source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
|---|---|
GB_unaryop__abs_bool_bool.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_bool
// op(A') function: GB_tran__abs_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int64)
// A*D function (colscale): GB (_AxD__ge_int64)
// D*A function (rowscale): GB (_DxB__ge_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int64)
// C=scalar+B GB (_bind1st__ge_int64)
// C=scalar+B' GB (_bind1st_tran__ge_int64)
// C=A+scalar GB (_bind2nd__ge_int64)
// C=A'+scalar GB (_bind2nd_tran__ge_int64)
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT64 || GxB_NO_GE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
transpose.c
|
/*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: transpose
PURPOSE: This program tests the efficiency with which a square matrix
can be transposed and stored in another matrix. The matrices
are distributed identically.
USAGE: Program input is three command line arguments that give the
matrix order, the number of times to repeat the operation
(iterations), and the number of threads to use:
transpose <# threads> <# iterations> <matrix_size> [tile size]
An optional parameter specifies the tile size used to divide the
individual matrix blocks for improved cache and TLB performance.
The output consists of diagnostics to make sure the
transpose worked and timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime() portable wall-timer interface.
bail_out()
test_results() Verify that the transpose worked
HISTORY: Written by Tim Mattson, April 1999.
Updated by Rob Van der Wijngaart, December 2005.
*******************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define A(i,j) A[i+order*(j)]
#define B(i,j) B[i+order*(j)]
static double test_results (size_t , double*, int);
int main(int argc, char ** argv) {
size_t order; /* order of a the matrix */
size_t i, j, it, jt; /* matrix/tile indices */
int Tile_order=32; /* default tile size for tiling of local transpose */
int iterations; /* number of times to do the transpose */
int iter; /* dummy */
int tiling; /* boolean: true if tiling is used */
double bytes; /* combined size of matrices */
double * RESTRICT A; /* buffer to hold original matrix */
double * RESTRICT B; /* buffer to hold transposed matrix */
double abserr; /* absolute error */
double epsilon=1.e-8; /* error tolerance */
double transpose_time,/* timing parameters */
avgtime;
int nthread_input,
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/*********************************************************************
** read and test input parameters
*********************************************************************/
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("OpenMP Matrix transpose: B = A^T\n");
if (argc != 4 && argc != 5){
printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n",
*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
exit(EXIT_FAILURE);
}
order = atoi(*++argv);
if (order <= 0){
printf("ERROR: Matrix Order must be greater than 0 : %zu \n", order);
exit(EXIT_FAILURE);
}
if (argc == 5) Tile_order = atoi(*++argv);
/* a non-positive tile size means no tiling of the local transpose */
tiling = (Tile_order > 0) && ((size_t)Tile_order < order);
if (!tiling) Tile_order = order;
/*********************************************************************
** Allocate space for the input and transpose matrix
*********************************************************************/
A = (double *)prk_malloc(order*order*sizeof(double));
if (A == NULL){
printf(" ERROR: cannot allocate space for input matrix: %ld\n",
order*order*sizeof(double));
exit(EXIT_FAILURE);
}
B = (double *)prk_malloc(order*order*sizeof(double));
if (B == NULL){
printf(" ERROR: cannot allocate space for output matrix: %ld\n",
order*order*sizeof(double));
exit(EXIT_FAILURE);
}
bytes = 2.0 * sizeof(double) * order * order;
#pragma omp parallel private (i, j, it, jt, iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Matrix order = %ld\n", order);
printf("Number of iterations = %d\n", iterations);
if (tiling) {
printf("Tile size = %d\n", Tile_order);
#if COLLAPSE
printf("Loop collapse = on\n");
#else
printf("Loop collapse = off\n");
#endif
}
else
printf("Untiled\n");
}
}
bail_out(num_error);
/* Fill the original matrix, set transpose to known garbage value. */
if (tiling) {
#if COLLAPSE
#pragma omp for collapse(2)
#else
#pragma omp for
#endif
for (j=0; j<order; j+=Tile_order)
for (i=0; i<order; i+=Tile_order)
for (jt=j; jt<MIN(order,j+Tile_order);jt++)
for (it=i; it<MIN(order,i+Tile_order); it++){
A(it,jt) = (double) (order*jt + it);
B(it,jt) = 0.0;
}
}
else {
#pragma omp for
for (j=0;j<order;j++)
for (i=0;i<order; i++) {
A(i,j) = (double) (order*j + i);
B(i,j) = 0.0;
}
}
for (iter = 0; iter<=iterations; iter++){
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
transpose_time = wtime();
}
}
/* Transpose the matrix */
if (!tiling) {
#pragma omp for
for (i=0;i<order; i++)
for (j=0;j<order;j++) {
B(j,i) += A(i,j);
A(i,j) += 1.0;
}
}
else {
#if COLLAPSE
#pragma omp for collapse(2)
#else
#pragma omp for
#endif
for (i=0; i<order; i+=Tile_order)
for (j=0; j<order; j+=Tile_order)
for (it=i; it<MIN(order,i+Tile_order); it++)
for (jt=j; jt<MIN(order,j+Tile_order);jt++) {
B(jt,it) += A(it,jt);
A(it,jt) += 1.0;
}
}
} /* end of iter loop */
#pragma omp barrier
#pragma omp master
{
transpose_time = wtime() - transpose_time;
}
} /* end of OpenMP parallel region */
abserr = test_results (order, B, iterations);
prk_free(B);
prk_free(A);
/*********************************************************************
** Analyze and output results.
*********************************************************************/
if (abserr < epsilon) {
printf("Solution validates\n");
avgtime = transpose_time/iterations;
printf("Rate (MB/s): %lf Avg time (s): %lf\n",
1.0E-06 * bytes/avgtime, avgtime);
#if VERBOSE
printf("Squared errors: %f \n", abserr);
#endif
exit(EXIT_SUCCESS);
}
else {
printf("ERROR: Aggregate squared error %lf exceeds threshold %e\n",
abserr, epsilon);
exit(EXIT_FAILURE);
}
} /* end of main */
/* function that computes the error committed during the transposition */
double test_results (size_t order, double *B, int iterations) {
double abserr=0.0;
size_t i, j;
double addit = ((double)(iterations+1) * (double) (iterations))/2.0;
#pragma omp parallel for reduction(+:abserr)
for (j=0;j<order;j++) {
for (i=0;i<order; i++) {
abserr += ABS(B(i,j) - ((i*order + j)*(iterations+1L)+addit));
}
}
#if VERBOSE
#pragma omp master
{
printf(" Squared sum of differences: %f\n",abserr);
}
#endif
return abserr;
}
|
GB_binop__eq_fp32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__eq_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp32)
// A*D function (colscale): GB (_AxD__eq_fp32)
// D*A function (rowscale): GB (_DxB__eq_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp32)
// C=scalar+B GB (_bind1st__eq_fp32)
// C=scalar+B' GB (_bind1st_tran__eq_fp32)
// C=A+scalar GB (_bind2nd__eq_fp32)
// C=A'+scalar GB (_bind2nd_tran__eq_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__eq_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lstm_bwd.c
|
#include <libxsmm.h>
#include <libxsmm_intrinsics_x86.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include "lstm_bwd.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
#define CHKERR_LIBXSMM_DNN(A) if ( A != LIBXSMM_DNN_SUCCESS ) fprintf(stderr, "%s\n", libxsmm_dnn_get_error(A) );
#if 0
# define PRINT_LAYOUT2(DESC, LAYOUT) print_layout2(DESC, LAYOUT)
#else
# define PRINT_LAYOUT2(DESC, LAYOUT)
#endif
void print_layout2(char *desc, libxsmm_dnn_tensor_datalayout *layout) {
char *dim_name[] = {"N", "H", "W", "C", "K", "R", "S", "X", "RLM", "RLK", "RLN"};
int i;
printf("%s: F:%d IF:%d TT: %d [", desc, layout->format, layout->custom_format, layout->tensor_type);
for(i = layout->num_dims - 1; i >= 0; i--) {
printf("%s:%d%s", dim_name[layout->dim_type[i]], layout->dim_size[i], i == 0 ? "" : ", ");
}
printf("]\n");
}
void zero_buf(float* buf, size_t size) {
int i;
#if defined(_OPENMP)
# pragma omp parallel for private(i)
#endif
for (i = 0; i < (int)size; ++i) {
buf[i] = 0.0f;
}
}
void* lstm_bwd_create( int N, /* minibatch size */
int C, /* input size */
int K, /* output size */
int t, /* timesteps = 1 */
int nThreads, /* number of threads */
const int w_in_kcck,
const int w_in_trans,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_rnncell_desc lstmcell_desc;
libxsmm_dnn_rnncell* libxsmm_handle;
libxsmm_dnn_tensor* libxsmm_input;
libxsmm_dnn_tensor* libxsmm_cs_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state = NULL;
libxsmm_dnn_tensor* libxsmm_weight;
libxsmm_dnn_tensor* libxsmm_recur_weight;
libxsmm_dnn_tensor* libxsmm_cs;
libxsmm_dnn_tensor* libxsmm_i;
libxsmm_dnn_tensor* libxsmm_f;
libxsmm_dnn_tensor* libxsmm_o;
libxsmm_dnn_tensor* libxsmm_ci;
libxsmm_dnn_tensor* libxsmm_co;
libxsmm_dnn_tensor* libxsmm_dinput;
libxsmm_dnn_tensor* libxsmm_dcs_prev;
libxsmm_dnn_tensor* libxsmm_dhidden_state_prev;
libxsmm_dnn_tensor* libxsmm_dweight;
libxsmm_dnn_tensor* libxsmm_drecur_weight;
libxsmm_dnn_tensor* libxsmm_dbias;
libxsmm_dnn_tensor* libxsmm_dcs;
libxsmm_dnn_tensor* libxsmm_dhidden_state;
libxsmm_dnn_tensor_datalayout* libxsmm_layout;
libxsmm_dnn_err_t status;
if (N <= 0) {
printf("N: %d should be > 0\n", N);
}
if (C <= 0) {
printf("C: %d should be > 0\n", C);
}
if (K <= 0) {
printf("K: %d should be > 0\n", K);
}
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 || (t > 1 && ht == 0) ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("csp:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* setup LIBXSMM handle */
lstmcell_desc.threads = nThreads;
lstmcell_desc.N = N;
lstmcell_desc.C = C;
lstmcell_desc.K = K;
lstmcell_desc.max_T = t;
lstmcell_desc.bn = 24;
if(N % 24 == 0) lstmcell_desc.bn = 24;
else if(N % 16 == 0) lstmcell_desc.bn = 16;
else if(N % 12 == 0) lstmcell_desc.bn = 12;
else if(N % 8 == 0) lstmcell_desc.bn = 8;
else if(N % 6 == 0) lstmcell_desc.bn = 6;
lstmcell_desc.bc = 64;
lstmcell_desc.bk = 64;
lstmcell_desc.cell_type = LIBXSMM_DNN_RNNCELL_LSTM;
lstmcell_desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NC;
lstmcell_desc.filter_format = (w_in_kcck ? LIBXSMM_DNN_TENSOR_FORMAT_CKPACKED : LIBXSMM_DNN_TENSOR_FORMAT_CK);
libxsmm_handle = libxsmm_dnn_create_rnncell( lstmcell_desc, &status );
CHKERR_LIBXSMM_DNN( status );
/* setup LIBXSMM buffers and filter */
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Xt", libxsmm_layout);
libxsmm_input = libxsmm_dnn_link_tensor( libxsmm_layout, xt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSP", libxsmm_layout);
libxsmm_cs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, csp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HP", libxsmm_layout);
libxsmm_hidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, hp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
if(t > 1) {
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HT", libxsmm_layout);
libxsmm_hidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, ht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
}
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("W", libxsmm_layout);
libxsmm_weight = libxsmm_dnn_link_tensor( libxsmm_layout, w, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("R", libxsmm_layout);
libxsmm_recur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, r, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSt", libxsmm_layout);
libxsmm_cs = libxsmm_dnn_link_tensor( libxsmm_layout, cst, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("It", libxsmm_layout);
libxsmm_i = libxsmm_dnn_link_tensor( libxsmm_layout, it, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ft", libxsmm_layout);
libxsmm_f = libxsmm_dnn_link_tensor( libxsmm_layout, ft, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ot", libxsmm_layout);
libxsmm_o = libxsmm_dnn_link_tensor( libxsmm_layout, ot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CIt", libxsmm_layout);
libxsmm_ci = libxsmm_dnn_link_tensor( libxsmm_layout, cit, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("COt", libxsmm_layout);
libxsmm_co = libxsmm_dnn_link_tensor( libxsmm_layout, cot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dXt", libxsmm_layout);
libxsmm_dinput = libxsmm_dnn_link_tensor( libxsmm_layout, dxt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCSPt", libxsmm_layout);
libxsmm_dcs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dcspt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHPt", libxsmm_layout);
libxsmm_dhidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dhpt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dW", libxsmm_layout);
libxsmm_dweight = libxsmm_dnn_link_tensor( libxsmm_layout, dw, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dR", libxsmm_layout);
libxsmm_drecur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, dr, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dB", libxsmm_layout);
libxsmm_dbias = libxsmm_dnn_link_tensor( libxsmm_layout, db, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCS", libxsmm_layout);
libxsmm_dcs = libxsmm_dnn_link_tensor( libxsmm_layout, dcs, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHt", libxsmm_layout);
libxsmm_dhidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, dht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_input, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs_prev, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state_prev, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
if(t > 1) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_i, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_f, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_o, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_ci, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_co, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dinput, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs_prev, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state_prev, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dweight, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_drecur_weight, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dbias, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
CHKERR_LIBXSMM_DNN( status );
if (scratch_size > 0) {
void* scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_scratch( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, scratch ) );
zero_buf( (float*)scratch, scratch_size/4 );
}
return (void*)libxsmm_handle;
}
void lstm_bwd_set_ptr( void* libxsmm_handle_, int w_in_trans,
const int t,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("cst:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_set_sequence_length( handle, t) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status), xt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status), csp) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status), hp) );
if(ht != 0) { CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status), ht) ); }
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status), r) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status), r) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status), cst) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status), it) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status), ft) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status), ot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status), cit) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status), cot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status), dxt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status), dcspt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status), dhpt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status), dw) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status), dr) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status), db) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status), dcs) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status), dht) );
}
void lstm_bwd_execute_omp( void* libxsmm_handle_ )
{
#ifdef _OPENMP
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
#pragma omp parallel
{
int tid = omp_get_thread_num();
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
#else
printf("%s:%d Shouldn't come here... exiting\n", __FILE__, __LINE__);
exit(1);
#endif
}
void lstm_bwd_execute_st( void* libxsmm_handle_, int tid )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
void lstm_bwd_destroy( void* libxsmm_handle_ )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status) ) );
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
if (scratch_size > 0) {
void *scratch = libxsmm_dnn_rnncell_get_scratch_ptr( handle, &status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_scratch( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD ) );
if(scratch) libxsmm_free(scratch);
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_rnncell( handle ) );
}
|
ops.h
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <system/op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <system/Environment.h>
#include <loops/summarystatsreduce.h>
#include <loops/ReduceType.h>
#define MIN_V 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(const X *dx, const Nd4jLong *xShapeBuffer, Z *result, const Nd4jLong *resultShapeBuffer, X *extraParams, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(const X *dx, const Nd4jLong *xShapeBuffer, Z *result, const Nd4jLong *resultShapeBuffer, X *extraParams, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(const X *dx, const Nd4jLong *xShapeBuffer, X *result, const Nd4jLong *resultShapeBuffer, X *extraParams, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(const X *dx, const Nd4jLong *xShapeBuffer, Z *result, const Nd4jLong *resultShapeBuffer, Z *extraParams, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(const X *x, const Nd4jLong *xShapeInfo, Z *extraParams, Z *result, const Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(const X *x, const Nd4jLong *xShapeInfo, X *extraParams, Z *result, const Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(const X *x, const Nd4jLong *xShapeInfo, X *extraParams, X *result, const Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeBuffer, Z *result, const Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeBuffer, Z *result, const Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeBuffer, X *result, const Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeBuffer,Z *result, const Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeInfo, X *extraParams, X *result, const Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeInfo, X *extraParams, Z *result, const Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo, const Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(const X *dx, const Nd4jLong *xShapeInfo, Z *extraParams, Z *result, const Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo, const Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(d1 - params[0]);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class SquaredReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(params[0] - d1);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (sd::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * sd::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * sd::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (sd::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * sd::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * sd::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * sd::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * sd::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (sd::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * sd::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * sd::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (sd::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (sd::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (sd::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class DivideNoNan {
public:
op_def static Z op(X d1, Y d2) {
if (d2 == (Y)0) return (Z)0;
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if (d2 == (Y)0) return (Z)0;
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if (params[0] == (Y)0) return (Z)0;
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return sd::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return sd::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return sd::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return sd::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = sd::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : sd::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = sd::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : sd::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X>
class IntOr {
public:
op_def static X op(X d1, X d2) {
return d2 | d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class IntAnd {
public:
op_def static X op(X d1, X d2) {
return d2 & d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class IntXor {
public:
op_def static X op(X d1, X d2) {
return d2 ^ d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class ShiftLeft {
public:
op_def static X op(X d1, X d2) {
return d1 << d2;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class ShiftRight {
public:
op_def static X op(X d1, X d2) {
return d1 >> d2;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class CyclicShiftLeft {
public:
op_def static X op(X d1, X d2) {
return sd::math::nd4j_rotl<X>(d1, d2);
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class CyclicShiftRight {
public:
op_def static X op(X d1, X d2) {
return sd::math::nd4j_rotr<X>(d1, d2);
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = sd::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN_V))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_log<X, Z>(d1) / sd::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(sd::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return sd::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return sd::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return sd::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return sd::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (sd::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return sd::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (sd::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class ReversePow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_pow<X, X, Z>(params[0], d1);
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * sd::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * sd::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * sd::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class IGamma {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_igamma<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_igamma<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_igamma<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class IGammac {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_igammac<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_igammac<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_igammac<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return sd::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsNegative {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 < (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return sd::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return sd::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction != static_cast<X>(0);
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return sd::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction != static_cast<X>(0);
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < -_v)
return -_v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * sd::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class Mish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * sd::math::nd4j_tanh<X,X>(sd::math::nd4j_softplus<X,X>(d1));
}
};
template <typename X>
class MishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto ex = sd::math::nd4j_exp<X,X>(d1);
auto e2x = ex * ex;
auto e3x = ex * ex * ex;
return (ex * (4 * (d1 + 1) + 4 * e2x + e3x + ex *(4 * d1 + 6))) / sd::math::nd4j_pow<X, X, X>((2 * ex + e2x + 2), (X) 2.f);
}
};
template <typename X>
class GELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * sd::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1);
}
};
template <typename X>
class PreciseGELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto sp = sd::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI));
auto xp = d1 + sd::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3));
return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + sd::math::nd4j_tanh<X, X>(sp * xp));
}
};
template <typename X>
class GELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x17 = static_cast<X>(1.702f) * d1;
auto ep = sd::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17);
// (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2
return (ep * (static_cast<X>(1.f) + ep + x17)) / sd::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2);
}
};
template <typename X>
class PreciseGELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x79 = static_cast<X>(0.797885) * d1;
auto x03 = sd::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3);
auto x39 = static_cast<X>(0.398942) * d1;
auto x05 = sd::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3);
auto scz = sd::math::nd4j_sech<X, X>(x79 + x03);
// 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3]
return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * sd::math::nd4j_tanh<X, X>(x79 + x03);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = sd::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / sd::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_log<X, X>(sd::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = sd::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class Affine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return params[0] * d1 + params[1];
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_min<X>(static_cast<X>(1), sd::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + sd::math::nd4j_exp<X, X>(-d1));
return (sd::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (sd::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return sd::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / sd::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = sd::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(sd::math::nd4j_abs<X>(dis)) + sd::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * sd::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + sd::math::nd4j_abs<X>(dis) + sd::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * sd::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + sd::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * sd::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class ScaledTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return params[0] * sd::math::nd4j_tanh<X, X>(params[1] * d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_max<X>(static_cast<X>(0), sd::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? sd::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (sd::math::nd4j_sqrt<X, X>(sd::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (sd::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * sd::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return sd::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return sd::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return sd::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return sd::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return sd::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return sd::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return sd::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return sd::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(sd::math::nd4j_isinf(d1) || sd::math::nd4j_isnan(d1));
case 15:
// isInfinite
return sd::math::nd4j_isinf(d1) || sd::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, X compare, X eps, int mode) {
switch (mode) {
case 0: // equals
return sd::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return sd::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return sd::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return sd::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return sd::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return sd::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return sd::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return sd::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(sd::math::nd4j_isinf(d1) || sd::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return sd::math::nd4j_isinf(d1) || sd::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X compare, X *extraParams) {
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[0]);
return op(d1, compare, eps, mode);
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
return op(d1, compare, eps, mode);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_elu<X,Z>(d1, static_cast<X>(d2));
}
};
template <typename X, typename Y, typename Z>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_eluderivative<X,Z>(d1, static_cast<X>(d2));
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class RELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt > xf ? static_cast<Z>(1.f) : static_cast<Z>(0.f);
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto val = static_cast<Z>(d1);
auto alpha = static_cast<Z>(d2);
return val < 0.0f ? alpha * val : val;
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * sd::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * sd::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / sd::math::nd4j_pow<X, X, X>(sd::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return sd::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ReduceSameBenchmarkOp {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<X>(sd::math::nd4j_pow<float,float,float>(f1, 3)
+ sd::math::nd4j_log<float,float>(f1) * sd::math::nd4j_sin<float,float>(f1)
/ sd::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* sd::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- sd::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto p = d1 * d1;
return static_cast<Z>(p) * sd::math::nd4j_log<X, Z>(p);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * sd::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return sd::math::nd4j_log<Z, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * sd::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_abs<X>(opOutput) + sd::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_abs<X>(opOutput) + sd::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return sd::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return sd::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction / (Z) n;
}
};
template <typename X, typename Z>
class ReduceFloatBenchmarkOp {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<Z>(sd::math::nd4j_pow<float,float,float>(f1, 3)
+ sd::math::nd4j_log<float,float>(f1) * sd::math::nd4j_sin<float,float>(f1)
/ sd::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* sd::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- sd::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return sd::math::nd4j_abs<X>(opOutput) + sd::math::nd4j_abs<X>(old);
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return sd::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return sd::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MAX;
op_def static X startingValue(const X *input) {
return -sd::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return sd::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return sd::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (sd::math::nd4j_abs<Z>(z1) > sd::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (sd::math::nd4j_abs<Z>(z1) < sd::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return sd::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMAX;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_max<X>(sd::math::nd4j_abs<X>(old), sd::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_max<X>(sd::math::nd4j_abs<X>(opOutput), sd::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return sd::math::nd4j_max<X>(sd::math::nd4j_abs<X>(d1), sd::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return sd::math::nd4j_abs<X>(d1) > sd::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return sd::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return sd::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMIN;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_min<X>(sd::math::nd4j_abs<X>(old), sd::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_min<X>(sd::math::nd4j_abs<X>(opOutput), sd::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return sd::math::nd4j_min<X>(sd::math::nd4j_abs<X>(d1), sd::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return sd::math::nd4j_min<X>(sd::math::nd4j_abs<X>(d1), sd::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return sd::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return sd::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MIN;
op_def static X startingValue(const X *input) {
return sd::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return sd::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return sd::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return sd::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(sd::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return sd::math::nd4j_sqrt<Z, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = sd::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return sd::math::nd4j_sqrt<Z, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return sd::math::nd4j_pow<X, Z, Z>(sd::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return sd::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return sd::math::nd4j_max<Z>(sd::math::nd4j_abs<Z>(old),
sd::math::nd4j_abs<Z>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return sd::math::nd4j_max<Z>(sd::math::nd4j_abs<Z>(reduction), sd::math::nd4j_abs<Z>(reduction));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (sd::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = sd::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (sd::math::nd4j_sqrt<Y, Y>(extraParams[0]) * sd::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
sd::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
sd::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return sd::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return sd::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
sd::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
sd::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (sd::math::nd4j_sqrt<Y, Y>(extraParams[0]) * sd::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(sd::math::nd4j_abs<X>(d1) * sd::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(sd::math::nd4j_abs<X>(d2) * sd::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
sd::math::atomics::nd4j_atomicAdd(&extraParams[0], sd::math::nd4j_abs<Y>(d1) * sd::math::nd4j_abs<Y>(d1));
sd::math::atomics::nd4j_atomicAdd(&extraParams[1], sd::math::nd4j_abs<Y>(d2) * sd::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
double eps = sd::math::nd4j_abs<double>(extraParamsRef[2]);
return static_cast<Z>(!sd::math::nd4j_eq<X>(d1, d2, eps));
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return sd::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return sd::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X, typename Z>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return sd::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = sd::math::nd4j_abs<X>(opOutput.value);
old.value = sd::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (sd::math::nd4j_abs<X>(f1.value) > sd::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -sd::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X, typename Z>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -sd::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X, typename Z>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -sd::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return sd::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = sd::math::nd4j_abs<X>(opOutput.value);
old.value = sd::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (sd::math::nd4j_abs<X>(f1.value) < sd::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return sd::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(const X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return sd::math::nd4j_sqrt<double, Z>(val.variance());
else
return sd::math::nd4j_sqrt<double, Z>(ret);
}
return sd::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = sd::math::nd4j_abs<X>(sd::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = sd::math::nd4j_abs<X>(sd::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return sd::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (sd::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (sd::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (sd::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (sd::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (sd::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (sd::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (sd::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (sd::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (sd::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (sd::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (sd::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (sd::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (sd::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (sd::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (sd::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (sd::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (sd::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return sd::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (sd::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return sd::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (sd::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (sd::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (sd::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (sd::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (sd::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (sd::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
dataCostD.h
|
void interp3xyz(float *datai, float *data, float *datax, float *datay, int len1, int len2)
{
// x-interp
for (int k = 0; k < len1; k++)
{
for (int j = 0; j < len2; j++)
{
int j2 = (j + 1) / 2;
if (j % 2 == 1)
{
for (int i = 0; i < len1; i++)
{
datax[i + j * len1 + k * len1 * len2] = data[i + j2 * len1 + k * len1 * len1];
}
}
else
for (int i = 0; i < len1; i++)
{
datax[i + j * len1 + k * len1 * len2] = 0.5 * (data[i + j2 * len1 + k * len1 * len1] + data[i + (j2 + 1) * len1 + k * len1 * len1]);
}
}
}
// y-interp
for (int k = 0; k < len1; k++)
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
int i2 = (i + 1) / 2;
if (i % 2 == 1)
datay[i + j * len2 + k * len2 * len2] = datax[i2 + j * len1 + k * len1 * len2];
else
datay[i + j * len2 + k * len2 * len2] = 0.5 * (datax[i2 + j * len1 + k * len1 * len2] + datax[i2 + 1 + j * len1 + k * len1 * len2]);
}
}
}
// z-interp
for (int k = 0; k < len2; k++)
{
int k2 = (k + 1) / 2;
if (k % 2 == 1)
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
datai[i + j * len2 + k * len2 * len2] = datay[i + j * len2 + k2 * len2 * len2];
}
}
}
else
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
datai[i + j * len2 + k * len2 * len2] = 0.5 * (datay[i + j * len2 + k2 * len2 * len2] + datay[i + j * len2 + (k2 + 1) * len2 * len2]);
}
}
}
}
}
void interp3xyzB(float *datai, float *data, float *datax, float *datay, int len1, int len2)
{
// x-interp
for (int k = 0; k < len1; k++)
{
for (int j = 0; j < len2; j++)
{
int j2 = (j + 1) / 2;
if (j % 2 == 0)
{
for (int i = 0; i < len1; i++)
{
datax[i + j * len1 + k * len1 * len2] = data[i + j2 * len1 + k * len1 * len1];
}
}
else
for (int i = 0; i < len1; i++)
{
datax[i + j * len1 + k * len1 * len2] = 0.5 * (data[i + j2 * len1 + k * len1 * len1] + data[i + (j2 - 1) * len1 + k * len1 * len1]);
}
}
}
// y-interp
for (int k = 0; k < len1; k++)
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
int i2 = (i + 1) / 2;
if (i % 2 == 0)
datay[i + j * len2 + k * len2 * len2] = datax[i2 + j * len1 + k * len1 * len2];
else
datay[i + j * len2 + k * len2 * len2] = 0.5 * (datax[i2 + j * len1 + k * len1 * len2] + datax[i2 - 1 + j * len1 + k * len1 * len2]);
}
}
}
// z-interp
for (int k = 0; k < len2; k++)
{
int k2 = (k + 1) / 2;
if (k % 2 == 0)
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
datai[i + j * len2 + k * len2 * len2] = datay[i + j * len2 + k2 * len2 * len2];
}
}
}
else
{
for (int j = 0; j < len2; j++)
{
for (int i = 0; i < len2; i++)
{
datai[i + j * len2 + k * len2 * len2] = 0.5 * (datay[i + j * len2 + k2 * len2 * len2] + datay[i + j * len2 + (k2 - 1) * len2 * len2]);
}
}
}
}
}
void dataCostCL(uint64_t *data, uint64_t *data2, float *results, int m, int n, int o, int len2, int step1, int hw, float quant, float alpha, int randnum)
{
cout << "d" << flush;
int len = hw * 2 + 1;
len2 = pow(hw * 2 + 1, 3);
int sz = m * n * o;
int m1 = m / step1;
int n1 = n / step1;
int o1 = o / step1;
int sz1 = m1 * n1 * o1;
// cout<<"len2: "<<len2<<" sz1= "<<sz1<<"\n";
int quant2 = quant;
// const int hw2=hw*quant2; == pad1
int pad1 = quant2 * hw;
int pad2 = pad1 * 2;
int mp = m + pad2;
int np = n + pad2;
int op = o + pad2;
int szp = mp * np * op;
uint64_t *data2p = new uint64_t[szp];
for (int k = 0; k < op; k++)
{
for (int j = 0; j < np; j++)
{
for (int i = 0; i < mp; i++)
{
data2p[i + j * mp + k * mp * np] = data2[max(min(i - pad1, m - 1), 0) + max(min(j - pad1, n - 1), 0) * m + max(min(k - pad1, o - 1), 0) * m * n];
}
}
}
int skipz = 1;
int skipx = 1;
int skipy = 1;
if (step1 > 4)
{
if (randnum > 0)
{
skipz = 2;
skipx = 2;
}
if (randnum > 1)
{
skipy = 2;
}
}
if (randnum > 1 & step1 > 7)
{
skipz = 3;
skipx = 3;
skipy = 3;
}
if (step1 == 4 & randnum > 1)
skipz = 2;
float maxsamp = ceil((float)step1 / (float)skipx) * ceil((float)step1 / (float)skipz) * ceil((float)step1 / (float)skipy);
// printf("randnum: %d, maxsamp: %d ",randnum,(int)maxsamp);
float alphai = (float)step1 / (alpha * (float)quant);
float alpha1 = 0.5 * alphai / (float)(maxsamp);
// uint64_t buffer[1000];
#pragma omp parallel for
for (int z = 0; z < o1; z++)
{
for (int x = 0; x < n1; x++)
{
for (int y = 0; y < m1; y++)
{
int z1 = z * step1;
int x1 = x * step1;
int y1 = y * step1;
/*for(int k=0;k<step1;k++){
for(int j=0;j<step1;j++){
for(int i=0;i<step1;i++){
buffer[i+j*step1+k*step1*step1]=data[i+y1+(j+x1)*m+(k+z1)*m*n];
}
}
}*/
for (int l = 0; l < len2; l++)
{
int out1 = 0;
int zs = l / (len * len);
int xs = (l - zs * len * len) / len;
int ys = l - zs * len * len - xs * len;
zs *= quant;
xs *= quant;
ys *= quant;
int x2 = xs + x1;
int z2 = zs + z1;
int y2 = ys + y1;
for (int k = 0; k < step1; k += skipz)
{
for (int j = 0; j < step1; j += skipx)
{
for (int i = 0; i < step1; i += skipy)
{
// unsigned int t=buffer[i+j*STEP+k*STEP*STEP]^buf2p[i+j*mp+k*mp*np];
// out1+=(wordbits[t&0xFFFF]+wordbits[t>>16]);
uint64_t t1 = data[i + y1 + (j + x1) * m + (k + z1) * m * n]; // buffer[i+j*step1+k*step1*step1];
uint64_t t2 = data2p[i + j * mp + k * mp * np + (y2 + x2 * mp + z2 * mp * np)];
out1 += bitset<64>(t1 ^ t2).count();
}
}
}
results[(y + x * m1 + z * m1 * n1) * len2 + l] = out1 * alpha1;
}
}
}
}
delete data2p;
return;
}
void warpImageCL(float *warped, float *im1, float *im1b, float *u1, float *v1, float *w1)
{
int m = image_m;
int n = image_n;
int o = image_o;
int sz = m * n * o;
float ssd = 0;
float ssd0 = 0;
float ssd2 = 0;
interp3(warped, im1, u1, v1, w1, m, n, o, m, n, o, true);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < o; k++)
{
ssd += pow(im1b[i + j * m + k * m * n] - warped[i + j * m + k * m * n], 2);
ssd0 += pow(im1b[i + j * m + k * m * n] - im1[i + j * m + k * m * n], 2);
}
}
}
ssd /= m * n * o;
ssd0 /= m * n * o;
SSD0 = ssd0;
SSD1 = ssd;
}
void warpAffineS(short *warped, short *input, float *X, float *u1, float *v1, float *w1)
{
int m = image_m;
int n = image_n;
int o = image_o;
int sz = m * n * o;
for (int k = 0; k < o; k++)
{
for (int j = 0; j < n; j++)
{
for (int i = 0; i < m; i++)
{
float y1 = (float)i * X[0] + (float)j * X[1] + (float)k * X[2] + (float)X[3] + v1[i + j * m + k * m * n];
float x1 = (float)i * X[4] + (float)j * X[5] + (float)k * X[6] + (float)X[7] + u1[i + j * m + k * m * n];
float z1 = (float)i * X[8] + (float)j * X[9] + (float)k * X[10] + (float)X[11] + w1[i + j * m + k * m * n];
int x = round(x1);
int y = round(y1);
int z = round(z1);
// if(y>=0&x>=0&z>=0&y<m&x<n&z<o){
warped[i + j * m + k * m * n] = input[min(max(y, 0), m - 1) + min(max(x, 0), n - 1) * m + min(max(z, 0), o - 1) * m * n];
//}
// else{
// warped[i+j*m+k*m*n]=0;
//}
}
}
}
}
void warpAffine(float *warped, float *input, float *im1b, float *X, float *u1, float *v1, float *w1)
{
int m = image_m;
int n = image_n;
int o = image_o;
int sz = m * n * o;
float ssd = 0;
float ssd0 = 0;
float ssd2 = 0;
for (int k = 0; k < o; k++)
{
for (int j = 0; j < n; j++)
{
for (int i = 0; i < m; i++)
{
float y1 = (float)i * X[0] + (float)j * X[1] + (float)k * X[2] + (float)X[3] + v1[i + j * m + k * m * n];
float x1 = (float)i * X[4] + (float)j * X[5] + (float)k * X[6] + (float)X[7] + u1[i + j * m + k * m * n];
float z1 = (float)i * X[8] + (float)j * X[9] + (float)k * X[10] + (float)X[11] + w1[i + j * m + k * m * n];
int x = floor(x1);
int y = floor(y1);
int z = floor(z1);
float dx = x1 - x;
float dy = y1 - y;
float dz = z1 - z;
warped[i + j * m + k * m * n] = (1.0 - dx) * (1.0 - dy) * (1.0 - dz) * input[min(max(y, 0), m - 1) + min(max(x, 0), n - 1) * m + min(max(z, 0), o - 1) * m * n] +
(1.0 - dx) * dy * (1.0 - dz) * input[min(max(y + 1, 0), m - 1) + min(max(x, 0), n - 1) * m + min(max(z, 0), o - 1) * m * n] +
dx * (1.0 - dy) * (1.0 - dz) * input[min(max(y, 0), m - 1) + min(max(x + 1, 0), n - 1) * m + min(max(z, 0), o - 1) * m * n] +
(1.0 - dx) * (1.0 - dy) * dz * input[min(max(y, 0), m - 1) + min(max(x, 0), n - 1) * m + min(max(z + 1, 0), o - 1) * m * n] +
dx * dy * (1.0 - dz) * input[min(max(y + 1, 0), m - 1) + min(max(x + 1, 0), n - 1) * m + min(max(z, 0), o - 1) * m * n] +
(1.0 - dx) * dy * dz * input[min(max(y + 1, 0), m - 1) + min(max(x, 0), n - 1) * m + min(max(z + 1, 0), o - 1) * m * n] +
dx * (1.0 - dy) * dz * input[min(max(y, 0), m - 1) + min(max(x + 1, 0), n - 1) * m + min(max(z + 1, 0), o - 1) * m * n] +
dx * dy * dz * input[min(max(y + 1, 0), m - 1) + min(max(x + 1, 0), n - 1) * m + min(max(z + 1, 0), o - 1) * m * n];
}
}
}
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < o; k++)
{
ssd += pow(im1b[i + j * m + k * m * n] - warped[i + j * m + k * m * n], 2);
ssd0 += pow(im1b[i + j * m + k * m * n] - input[i + j * m + k * m * n], 2);
}
}
}
ssd /= m * n * o;
ssd0 /= m * n * o;
SSD0 = ssd0;
SSD1 = ssd;
}
|
SeparableFilter.h
|
/*
Copyright 2007 Niels Martin Hansen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Contact:
E-mail: <[email protected]>
IRC: jfs in #aegisub on irc.rizon.net
*/
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include <math.h>
// Filter an image in horizontal direction with a one-dimensional filter
// PixelWidth is the distance in bytes between pixels
template<ptrdiff_t PixelDist>
void SeparableFilterX(unsigned char *src, unsigned char *dst, int width, int height, ptrdiff_t stride, int *kernel, int kernel_size, int divisor)
{
#pragma omp parallel for
for (int y = 0; y < height; y++) {
unsigned char *in = src + y*stride;
unsigned char *out = dst + y*stride;
for (int x = 0; x < width; x++) {
int accum = 0;
for (int k = 0; k < kernel_size; k++) {
int xofs = k - kernel_size/2;
if (x+xofs < 0) xofs += width;
if (x+xofs >= width) xofs -= width;
accum += (int)(in[xofs*PixelDist] * kernel[k]);
}
accum /= divisor;
if (accum > 255) accum = 255;
if (accum < 0) accum = 0;
*out = (unsigned char)accum;
in+=PixelDist;
out+=PixelDist;
}
}
}
// Filter an image in vertical direction with a one-dimensional filter
// This one templated with PixelWidth since the channel interlacing is horizontal only,
// filtering once vertically will automatically catch all channels.
// (Width must be multiplied by pixel width for that to happen though.)
template<ptrdiff_t PixelDist>
void SeparableFilterY(unsigned char *src, unsigned char *dst, int width, int height, ptrdiff_t stride, int *kernel, int kernel_size, int divisor)
{
#pragma omp parallel for
width *= PixelDist;
for (int x = 0; x < width; x+=PixelDist) {
unsigned char *in = src + x;
unsigned char *out = dst + x;
for (int y = 0; y < height; y++) {
int accum = 0;
for (int k = 0; k < kernel_size; k++) {
int yofs = k - kernel_size/2;
if (y+yofs < 0) yofs += height;
if (y+yofs >= height) yofs -= height;
accum += (int)(in[yofs*stride] * kernel[k]);
}
accum /= divisor;
if (accum > 255) accum = 255;
if (accum < 0) accum = 0;
*out = (unsigned char)accum;
in += stride;
out += stride;
}
}
}
static inline double NormalDist(double sigma, double x)
{
if (sigma <= 0 && x == 0) return 1;
else if (sigma <= 0) return 0;
else return exp(-(x*x)/(2*sigma*sigma)) / (sigma * sqrt(2*3.1415926535));
}
struct GaussianKernel {
int *kernel;
int width;
int divisor;
inline GaussianKernel(double sigma)
{
width = (int)(sigma*3 + 0.5) | 1; // binary-or with 1 to make sure the number is odd
if (width < 3) width = 3;
kernel = new int[width];
kernel[width/2] = (int)(NormalDist(sigma, 0) * 255);
divisor = kernel[width/2];
for (int x = width/2-1; x >= 0; x--) {
int val = (int)(NormalDist(sigma, width/2-x) * 255 + 0.5);
divisor += val*2;
kernel[x] = val;
kernel[width - x - 1] = val;
}
}
inline ~GaussianKernel()
{
delete[] kernel;
}
};
|
t006.c
|
#include<stdint.h>
#include<stdlib.h>
#include<stdio.h>
#include<omp.h>
typedef struct {int64_t nteam; int64_t mthread;} tinfo;
int
main(int argc, char **argv)
{
const int h = omp_get_initial_device();
const int d = omp_get_default_device();
const size_t s = sizeof(tinfo);
tinfo *t = malloc(s);
if(!t){
perror("malloc error");
exit(1);
}
t->nteam = -1;
t->mthread = -1;
tinfo *p = omp_target_alloc(s, d);
if(!p){
perror("omp_target_alloc error");
exit(1);
}
omp_target_memcpy(p, t, s, 0, 0, d, h);
#pragma omp target teams is_device_ptr(p)
{
if(omp_get_team_num() == 0){
p->nteam = omp_get_num_teams();
p->mthread = omp_get_max_threads();
}
}
omp_target_memcpy(t, p, s, 0, 0, h, d);
printf("nteam: %ld mthread: %ld\n", t->nteam, t->mthread);
int ret = 0;
if(t->nteam <= 0 || t->mthread <= 0) ret = 1;
omp_target_free(p, d);
free(t);
return ret;
}
|
sparseBlocksJacobi.h
|
//
// Created by mbarb on 23/01/2018.
//
#ifndef PARALLELITERATIVE_SPARSEBLOCKSJACOBI_H
#define PARALLELITERATIVE_SPARSEBLOCKSJACOBI_H
#include "Eigen"
#include "utils.h"
#include "sparseParallelJacobi.h"
namespace Iterative {
template <typename Scalar>
class sparseBlocksJacobi : public sparseParallelJacobi<Scalar> {
public:
/**
*
* @param A linear system matrix
* @param b known term vector
* @param iterations max number of iterations
* @param tolerance min error tolerated
* @param workers number of threads
* @param blockSize size of the block
*/
explicit sparseBlocksJacobi(
const Eigen::SparseMatrix<Scalar>& A,
const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers = 0L,
const ulonglong blockSize = 0L) :
sparseParallelJacobi<Scalar>::sparseParallelJacobi(A, b, iterations, tolerance, workers) {
this->blockSize = blockSize;
if (blockSize == 0)
this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L);
splitter();
}
/**
*
* @return
*/
const Eigen::ColumnVector<Scalar, Eigen::Dynamic> &solve() {
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldSolution(this->solution);
std::vector<Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>> inverses(blocks.size());
Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic> I(this->blockSize,this->blockSize);
Eigen::SimplicialLDLT<Eigen::SparseMatrix<Scalar>> solver;
I.setIdentity();
// compute the inverses of the blocks and memorize it
#pragma omp parallel for firstprivate(I) private(solver)
for (int i = 0; i < blocks.size()-1; ++i) {
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols,
blocks[i].rows);
solver.compute(block);
inverses[i] = solver.solve(I);
}
{
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks.back().startCol, blocks.back().startRow,
blocks.back().cols,blocks.back().rows);
if(block.cols()!=this->blockSize || block.rows()!=this->blockSize){
I.resize(block.rows(), block.cols());
I.setIdentity();
}
solver.compute(block);
inverses.back() = solver.solve(I);
}
std::vector<int> index;
for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) {
#pragma omp parallel for firstprivate(oldSolution) schedule(dynamic)
for (int i = 0; i < inverses.size(); ++i) {
// set zero the components of the solution b that corresponds to the inverse
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment(blocks[i].startCol,
blocks[i].cols);
auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols);
zeroBlock.setZero();
// the segment of the solution b that this inverse approximates
auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols);
// approximate the solution using the inverse and the solution at the previous iteration
block = inverses[i] *
(this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols);
zeroBlock = block;
if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) {
#pragma omp critical
index.emplace_back(i);
}
}
if (!index.empty()) {
std::sort(index.rbegin(), index.rend());
for (auto i : index) {
blocks.erase(blocks.begin() + i);
inverses.erase(inverses.begin() + i);
}
if (inverses.empty()) break;
index.clear();
}
std::swap(this->solution, oldSolution);
}
std::cout << this->iteration << std::endl;
return this->solution;
}
protected:
ulonglong blockSize;
std::vector<Index> blocks;
void splitter() {
for (ulonglong i = 0; i < this->A.cols(); i += blockSize) {
blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i),
i, std::min(blockSize, (ulonglong)this->A.rows() - i)));
}
}
private:
};
}
#endif //PARALLELITERATIVE_BLOCKSJACOBI_H
|
shear.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The RotateImage, XShearImage, and YShearImage methods are based on the
% paper "A Fast Algorithm for General Raster Rotatation" by Alan W. Paeth,
% Graphics Interface '86 (Vancouver). RotateImage is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const MagickRealType x_shear,const MagickRealType x_shear,
% const MagickRealType width,const MagickRealType height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const MagickRealType x_shear,const MagickRealType y_shear,
const MagickRealType width,const MagickRealType height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RadonInfo
{
CacheType
type;
size_t
width,
height;
MagickSizeType
length;
MagickBooleanType
mapped;
char
path[MaxTextExtent];
int
file;
unsigned short
*cells;
} RadonInfo;
static RadonInfo *DestroyRadonInfo(RadonInfo *radon_info)
{
assert(radon_info != (RadonInfo *) NULL);
switch (radon_info->type)
{
case MemoryCache:
{
if (radon_info->mapped == MagickFalse)
radon_info->cells=(unsigned short *) RelinquishMagickMemory(
radon_info->cells);
else
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,
(size_t) radon_info->length);
RelinquishMagickResource(MemoryResource,radon_info->length);
break;
}
case MapCache:
{
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,(size_t)
radon_info->length);
RelinquishMagickResource(MapResource,radon_info->length);
}
case DiskCache:
{
if (radon_info->file != -1)
(void) close(radon_info->file);
(void) RelinquishUniqueFileResource(radon_info->path);
RelinquishMagickResource(DiskResource,radon_info->length);
break;
}
default:
break;
}
return((RadonInfo *) RelinquishMagickMemory(radon_info));
}
static MagickBooleanType ResetRadonCells(RadonInfo *radon_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned short
value;
if (radon_info->type != DiskCache)
{
(void) ResetMagickMemory(radon_info->cells,0,(size_t) radon_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(radon_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) radon_info->height; y++)
{
for (x=0; x < (ssize_t) radon_info->width; x++)
{
count=write(radon_info->file,&value,sizeof(*radon_info->cells));
if (count != (ssize_t) sizeof(*radon_info->cells))
break;
}
if (x < (ssize_t) radon_info->width)
break;
}
return(y < (ssize_t) radon_info->height ? MagickFalse : MagickTrue);
}
static RadonInfo *AcquireRadonInfo(const Image *image,const size_t width,
const size_t height,ExceptionInfo *exception)
{
MagickBooleanType
status;
RadonInfo
*radon_info;
radon_info=(RadonInfo *) AcquireMagickMemory(sizeof(*radon_info));
if (radon_info == (RadonInfo *) NULL)
return((RadonInfo *) NULL);
(void) ResetMagickMemory(radon_info,0,sizeof(*radon_info));
radon_info->width=width;
radon_info->height=height;
radon_info->length=(MagickSizeType) width*height*sizeof(*radon_info->cells);
radon_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,radon_info->length);
if ((status != MagickFalse) &&
(radon_info->length == (MagickSizeType) ((size_t) radon_info->length)))
{
status=AcquireMagickResource(MemoryResource,radon_info->length);
if (status != MagickFalse)
{
radon_info->mapped=MagickFalse;
radon_info->cells=(unsigned short *) AcquireMagickMemory((size_t)
radon_info->length);
if (radon_info->cells == (unsigned short *) NULL)
{
radon_info->mapped=MagickTrue;
radon_info->cells=(unsigned short *) MapBlob(-1,IOMode,0,(size_t)
radon_info->length);
}
if (radon_info->cells == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,radon_info->length);
}
}
radon_info->file=(-1);
if (radon_info->cells == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,radon_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(DestroyRadonInfo(radon_info));
}
radon_info->type=DiskCache;
(void) AcquireMagickResource(MemoryResource,radon_info->length);
radon_info->file=AcquireUniqueFileResource(radon_info->path);
if (radon_info->file == -1)
return(DestroyRadonInfo(radon_info));
status=AcquireMagickResource(MapResource,radon_info->length);
if (status != MagickFalse)
{
status=ResetRadonCells(radon_info);
if (status != MagickFalse)
{
radon_info->cells=(unsigned short *) MapBlob(radon_info->file,
IOMode,0,(size_t) radon_info->length);
if (radon_info->cells != (unsigned short *) NULL)
radon_info->type=MapCache;
else
RelinquishMagickResource(MapResource,radon_info->length);
}
}
}
return(radon_info);
}
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
static inline ssize_t ReadRadonCell(const RadonInfo *radon_info,
const MagickOffsetType offset,const size_t length,unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PPREAD)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ReadRadonCell)
#endif
{
i=(-1);
if (lseek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PPREAD)
count=read(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PPREAD)
}
}
#endif
return(i);
}
static inline ssize_t WriteRadonCell(const RadonInfo *radon_info,
const MagickOffsetType offset,const size_t length,const unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WriteRadonCell)
#endif
{
if (lseek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
}
}
#endif
return(i);
}
static inline unsigned short GetRadonCell(const RadonInfo *radon_info,
const ssize_t x,const ssize_t y)
{
MagickOffsetType
i;
unsigned short
value;
i=(MagickOffsetType) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(0);
if (radon_info->type != DiskCache)
return(radon_info->cells[i]);
value=0;
(void) ReadRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(unsigned char *) &value);
return(value);
}
static inline MagickBooleanType SetRadonCell(const RadonInfo *radon_info,
const ssize_t x,const ssize_t y,const unsigned short value)
{
MagickOffsetType
i;
ssize_t
count;
i=(MagickOffsetType) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(MagickFalse);
if (radon_info->type != DiskCache)
{
radon_info->cells[i]=value;
return(MagickTrue);
}
count=WriteRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(const unsigned char *) &value);
if (count != (ssize_t) sizeof(*radon_info->cells))
return(MagickFalse);
return(MagickTrue);
}
static void RadonProjection(RadonInfo *source_cells,
RadonInfo *destination_cells,const ssize_t sign,size_t *projection)
{
RadonInfo
*swap;
register ssize_t
x;
register RadonInfo
*p,
*q;
size_t
step;
p=source_cells;
q=destination_cells;
for (step=1; step < p->width; step*=2)
{
for (x=0; x < (ssize_t) p->width; x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
cell;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (p->height-i-1); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+(ssize_t)
step,y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell+GetRadonCell(p,x+i+(ssize_t)
step,y+i+1));
}
for ( ; y < (ssize_t) (p->height-i); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+(ssize_t) step,
y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
for ( ; y < (ssize_t) p->height; y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell);
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (x=0; x < (ssize_t) p->width; x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (p->height-1); y++)
{
ssize_t
delta;
delta=GetRadonCell(p,x,y)-(ssize_t) GetRadonCell(p,x,y+1);
sum+=delta*delta;
}
projection[p->width+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
RadonInfo
*destination_cells,
*source_cells;
register ssize_t
i;
size_t
count,
width;
ssize_t
y;
unsigned char
byte;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_cells=AcquireRadonInfo(image,width,image->rows,exception);
destination_cells=AcquireRadonInfo(image,width,image->rows,exception);
if ((source_cells == (RadonInfo *) NULL) ||
(destination_cells == (RadonInfo *) NULL))
{
if (destination_cells != (RadonInfo *) NULL)
destination_cells=DestroyRadonInfo(destination_cells);
if (source_cells != (RadonInfo *) NULL)
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
if (ResetRadonCells(source_cells) == MagickFalse)
{
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
}
}
RadonProjection(source_cells,destination_cells,-1,projection);
(void) ResetRadonCells(source_cells);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
}
}
RadonProjection(source_cells,destination_cells,1,projection);
image_view=DestroyCacheView(image_view);
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickPixelPacket
background;
MagickRealType
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(p);
background.green+=QuantumScale*GetPixelGreen(p);
background.blue+=QuantumScale*GetPixelBlue(p);
background.opacity+=QuantumScale*GetPixelOpacity(p);
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (artifact == (const char *) NULL)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
static Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if (rotations == 0)
return(CloneImage(image,0,0,MagickTrue,exception));
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
rotate_view=AcquireCacheView(rotate_image);
switch (rotations)
{
case 0:
{
/*
Rotate 0 degrees.
*/
break;
}
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress, status)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*restrict tile_indexes;
tile_indexes=indexes+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(rotate_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict rotate_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
register const PixelPacket
*restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*restrict tile_indexes;
tile_indexes=indexes+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
X shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress, status)
#endif
for (y=0; y < (ssize_t) height; y++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*restrict indexes,
*restrict shear_indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (x_offset+width+step-i) >= image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_XShearImage)
#endif
proceed=SetImageProgress(image,XShearImageTag,progress++,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
x;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress, status)
#endif
for (x=0; x < (ssize_t) width; x++)
{
ssize_t
step;
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register IndexPacket
*restrict indexes,
*restrict shear_indexes;
register ssize_t
i;
register PixelPacket
*restrict p,
*restrict q;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (y_offset+height+step-i) >= image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_YShearImage)
#endif
proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% RotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. RotateImage is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
ssize_t
x_offset,
y_offset;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info;
size_t
height,
rotations,
width,
y_width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
width=image->columns;
height=image->rows;
if ((rotations == 1) || (rotations == 3))
{
width=image->rows;
height=image->columns;
}
y_width=width+(ssize_t) floor(fabs(shear.x)*height+0.5);
x_offset=(ssize_t) ceil((double) width+((fabs(shear.y)*height)-width)/2.0-
0.5);
y_offset=(ssize_t) ceil((double) height+((fabs(shear.y)*y_width)-height)/2.0-
0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) x_offset;
border_info.height=(size_t) y_offset;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,x_offset,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,y_width,height,(ssize_t)
(rotate_image->columns-y_width)/2,y_offset,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,y_width,rotate_image->rows,(ssize_t)
(rotate_image->columns-y_width)/2,0,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
ssize_t
x_offset,
y_offset;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info;
size_t
y_width;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
y_width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
x_offset=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
y_offset=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*y_width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) x_offset;
border_info.height=(size_t) y_offset;
shear_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,x_offset,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,y_width,image->rows,(ssize_t)
(shear_image->columns-y_width)/2,y_offset,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
return(shear_image);
}
|
LR.h
|
#include "omp.h"
#include <sys/timeb.h>
#include <cmath>
#include <iostream>
#include "split.h"
using namespace std;
static omp_lock_t lock;
int solve_lr(int nb_row, int nb_col, double ****align_edge, int ***lambda_for_cik, problem *current_problem) {
omp_init_lock(&lock);
double global_ub(INFINITY);
double global_lb(-INFINITY);
double ***lambda_h = new double **[nb_row];
double ***lambda_v = new double **[nb_row];
double ***lambda_a = new double **[nb_row];
double ***g_h = new double **[nb_row];
double ***g_v = new double **[nb_row];
double ***g_a = new double **[nb_row];
for (int i = 0; i < nb_row; i++) {
lambda_h[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (i > 0) {
lambda_h[i][j] = new double[i];
for (int z = 0; z < i; z++)
lambda_h[i][j][z] = 0;
} else
lambda_h[i][j] = NULL;
}
}
for (int i = 0; i < nb_row; i++) {
lambda_v[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (j > 0) {
lambda_v[i][j] = new double[j];
for (int z = 0; z < j; z++)
lambda_v[i][j][z] = 0;
} else
lambda_v[i][j] = NULL;
}
}
for (int i = 0; i < nb_row; i++) {
lambda_a[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (i > 0) {
lambda_a[i][j] = new double[i];
for (int z = 0; z < i; z++)
lambda_a[i][j][z] = 0;
} else
lambda_a[i][j] = NULL;
}
}
for (int i = 0; i < nb_row; i++) {
g_h[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (i > 0) {
g_h[i][j] = new double[i];
for (int z = 0; z < i; z++)
g_h[i][j][z] = 0;
} else
g_h[i][j] = NULL;
}
}
for (int i = 0; i < nb_row; i++) {
g_v[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (j > 0) {
g_v[i][j] = new double[j];
for (int z = 0; z < j; z++)
g_v[i][j][z] = 0;
} else
g_v[i][j] = NULL;
}
}
for (int i = 0; i < nb_row; i++) {
g_a[i] = new double *[nb_col];
for (int j = 0; j < nb_col; j++) {
if (i > 0) {
g_a[i][j] = new double[i];
for (int z = 0; z < i; z++)
g_a[i][j][z] = 0;
} else
g_a[i][j] = NULL;
}
}
double alpha(1);
int nb_improve(0);
int nb_noimprove(0);
double last_ub(0);
double last_lb(0);
double dp_mat[100][100] = {0};
int path_mat[100][100] = {0};
double align_node[100][100] = {0};
bool need_to_change[100][100] = {0};
memset(need_to_change, 1, 100 * 100 * sizeof(bool));
for (int iteration = 0; iteration < 500; iteration++) {
double ub(0);
double lb(0);
//---------------------------------------------------------------------double dp
bool **align_node_xvalue = new bool *[nb_row];
for (int row = 0; row < nb_row; row++) {
align_node_xvalue[row] = new bool[nb_col];
for (int col = 0; col < nb_col; col++) {
align_node_xvalue[row][col] = 0;
}
}
//--------------------------------------------local dp
#pragma omp parallel for schedule(dynamic)
for (int row1 = 0; row1 < nb_row; row1++) {
double align_node_temp[100] = {0};
double dp_mat_test[100] = {0};
for (int col1 = 0; col1 < nb_col; col1++) {
if (row1 < current_problem->up[col1] && row1 > current_problem->lo[col1]) {
if (need_to_change[row1][col1]) {
double replace(0);
for (int row2 = 0; row2 < nb_row - row1; row2++) {
if (row2 != 0) {
for (int col2 = 0; col2 < nb_col - col1; col2++) {
if (col2 != 0) {
double temp;
double temp2 = align_edge[row1][col1][row2 - 1][col2 - 1];
if (row1 + row2 < current_problem->up[col1 + col2] &&
row1 + row2 > current_problem->lo[col1 + col2])
temp = replace + temp2
- lambda_h[row1 + row2][col1 + col2][row1]
- lambda_v[row1 + row2][col1 + col2][col1]
+ (temp2 > 0 ? 0 : lambda_a[row1 + row2][col1 + col2][row1]);
else
temp = -INFINITY;
replace = dp_mat_test[col2];
if (dp_mat_test[col2] < dp_mat_test[col2 - 1])
dp_mat_test[col2] = dp_mat_test[col2 - 1];
if (temp > dp_mat_test[col2])
dp_mat_test[col2] = temp;
} else {
replace = dp_mat_test[0];
if (row2 + row1 < current_problem->up[col1] &&
row2 + row1 > current_problem->lo[col1])
dp_mat_test[0] = 0;
else
dp_mat_test[0] = -INFINITY;
}
}
} else {
for (int col2 = 0; col2 < nb_col - col1; col2++) {
if (row1 < current_problem->up[col2 + col1] &&
row1 > current_problem->lo[col1 + col2])
dp_mat_test[col2] = 0;
else
dp_mat_test[col2] = -INFINITY;
}
}
}
align_node_temp[col1] = dp_mat_test[nb_col - col1 - 1] + 0.2;
for (int j = 0; j < row1; j++) {
align_node_temp[col1] += lambda_h[row1][col1][j] - lambda_a[row1][col1][j];
}
for (int l = 0; l < col1; l++) {
align_node_temp[col1] += lambda_v[row1][col1][l];
}
for (int i = 1; i < lambda_for_cik[row1][col1][0] * 2 + 1;) {
int r = lambda_for_cik[row1][col1][i++];
int s = lambda_for_cik[row1][col1][i++];
align_node_temp[col1] -= lambda_a[r][s][row1];
}
} else
align_node_temp[col1] = align_node[row1][col1];
} else
align_node_temp[col1] = -INFINITY;
}
omp_set_lock(&lock);
memcpy(align_node[row1], align_node_temp, nb_col * sizeof(double));
omp_unset_lock(&lock);
}
memset(need_to_change, 0, 100 * 100 * sizeof(bool));
//--------------------------------------------local dp
//--------------------------------------------global dp
for (int i = 0; i < nb_row; i++) {
if (i < current_problem->up[0] && i > current_problem->lo[0]) {
dp_mat[i][0] = align_node[i][0];
path_mat[i][0] = -2;
} else {
dp_mat[i][0] = -INFINITY;
path_mat[i][0] = -2;
}
}
for (int i = 0; i < nb_col; i++) {
if (0 < current_problem->up[i] && 0 > current_problem->lo[i]) {
dp_mat[0][i] = align_node[0][i];
path_mat[0][i] = -1;
} else {
dp_mat[0][i] = -INFINITY;
path_mat[0][i] = -1;
}
}
for (int row = 1; row < nb_row; row++) {
for (int col = 1; col < nb_col; col++) {
dp_mat[row][col] = dp_mat[row][col - 1];
path_mat[row][col] = -1;
double temp = dp_mat[row - 1][col];
if (temp > dp_mat[row][col]) {
dp_mat[row][col] = temp;
path_mat[row][col] = -2;
}
temp = dp_mat[row - 1][col - 1] + align_node[row][col];
if (temp > dp_mat[row][col]) {
dp_mat[row][col] = temp;
path_mat[row][col] = 1;
}
}
}
ub = dp_mat[nb_row - 1][nb_col - 1]; //upper bound
//---------------------------------------------flash back
int x_1 = nb_row;
int y_1 = nb_col;
while (x_1 != 1 || y_1 != 1) {
if (path_mat[x_1 - 1][y_1 - 1] == -1)
y_1--;
else if (path_mat[x_1 - 1][y_1 - 1] == -2)
x_1--;
else if (path_mat[x_1 - 1][y_1 - 1] == 1) {
align_node_xvalue[x_1 - 1][y_1 - 1] = true;
x_1--;
y_1--;
if (x_1 - 1 == 0 || y_1 - 1 == 0)
align_node_xvalue[x_1 - 1][y_1 - 1] = 1;
}
}
//---------------------------------------------flash back
//--------------------------------------------global dp
//--------------------------------------------compute lower bound
#pragma omp parallel for schedule(dynamic) reduction(+: lb)
for (int row1 = 0; row1 < nb_row; row1++) {
for (int col1 = 0; col1 < nb_col; col1++) {
if (align_node_xvalue[row1][col1]) {
lb += 0.2;
for (int row2 = 0; row2 < nb_row - row1 - 1; row2++) {
for (int col2 = 0; col2 < nb_col - col1 - 1; col2++) {
if (align_node_xvalue[row1 + row2 + 1][col1 + col2 + 1])
lb += align_edge[row1][col1][row2][col2];
}
}
}
}
}
//--------------------------------------------compute lower bound
if (ub < global_ub)
global_ub = ub;
if (lb > global_lb)
global_lb = lb;
if (iteration > 0) {
if (ub < last_ub || lb > last_lb) {
nb_improve++;
nb_noimprove = 0;
if (nb_improve >= 5)
alpha /= 0.9;
} else {
nb_improve = 0;
nb_noimprove++;
if (nb_noimprove >= 5)
alpha *= 0.9;
}
}
last_ub = ub;
last_lb = lb;
if (global_ub <= global_lb) {
cout << "iteration: " << iteration << endl;
cout << "final_solution: " << global_lb << endl;
return 0;
}
//---------------------------------------------------------------------double dp
//---------------------------------------------------------------------lambda update
double thelta(0);
//---------------------------------------------------------------------update g
#pragma omp parallel for schedule(dynamic)
for (int row1 = 0; row1 < nb_row; row1++) {
for (int col1 = 0; col1 < nb_col; col1++) {
if (row1 < current_problem->up[col1] && row1 > current_problem->lo[col1]) {
for (int j_1 = 0; j_1 < row1; j_1++) {
g_a[row1][col1][j_1] += 1.0;
}
}
if (align_node_xvalue[row1][col1]) {
for (int j = 0; j < row1; j++) {
g_h[row1][col1][j] += 1.0;
g_a[row1][col1][j] -= 1.0;
}
for (int l = 0; l < col1; l++) {
g_v[row1][col1][l] += 1.0;
}
}
}
}
#pragma omp parallel for schedule(dynamic)
for (int row1 = 0; row1 < nb_row; row1++) {
for (int col1 = 0; col1 < nb_col; col1++) {
if (align_node_xvalue[row1][col1]) {
for (int i = 1; i < lambda_for_cik[row1][col1][0] * 2 + 1;) {
int r = lambda_for_cik[row1][col1][i++];
int s = lambda_for_cik[row1][col1][i++];
g_a[r][s][row1] -= 1.0;
}
}
}
}
/*#pragma omp parallel for schedule(dynamic) private(dp_mat, path_mat)
for (int row1 = 0; row1 < nb_row; row1++)
{
for (int col1 = 0; col1 < nb_col; col1++)
{
if (align_node_xvalue[row1][col1])
{
for (int x = -2; x < 3; x++)
{
if (row1 + x >= 0 && row1 + x < nb_row)
need_to_change[row1 + x][col1] = true;
}
for (int i = 0; i < nb_row - row1; i++)
{
if (i + row1 < current_problem->up[col1] && i + row1 > current_problem->lo[col1])
{
dp_mat[i][0] = 0; path_mat[i][0] = -2;
}
else
{
dp_mat[i][0] = -INFINITY; path_mat[i][0] = -2;
}
}
for (int i = 0; i < nb_col - col1; i++)
{
if (row1 < current_problem->up[i + col1] && row1 > current_problem->lo[i + col1])
{
dp_mat[0][i] = 0; path_mat[0][i] = -1;
}
else
{
dp_mat[0][i] = -INFINITY; path_mat[0][i] = -1;
}
}
for (int row2 = 1; row2 < nb_row - row1; row2++)
{
for (int col2 = 1; col2 < nb_col - col1; col2++)
{
dp_mat[row2][col2] = dp_mat[row2][col2 - 1];
path_mat[row2][col2] = -1;
double temp = dp_mat[row2 - 1][col2];
if (temp > dp_mat[row2][col2])
{
dp_mat[row2][col2] = temp;
path_mat[row2][col2] = -2;
}
double temp2 = align_edge[row1][col1][row2 - 1][col2 - 1];
if (row1 + row2 < current_problem->up[col1 + col2] && row1 + row2 > current_problem->lo[col1 + col2])
temp = dp_mat[row2 - 1][col2 - 1] + temp2
- lambda_h[row1 + row2][col1 + col2][row1]
- lambda_v[row1 + row2][col1 + col2][col1]
+ (temp2 <= 0 ? lambda_a[row1 + row2][col1 + col2][row1] : 0);
else
temp = -INFINITY;
if (temp > dp_mat[row2][col2])
{
dp_mat[row2][col2] = temp;
path_mat[row2][col2] = 1;
}
}
}
int x = nb_row - row1;
int y = nb_col - col1;
while (x != 1 || y != 1)
{
if (path_mat[x - 1][y - 1] == -1)
y--;
else if (path_mat[x - 1][y - 1] == -2)
x--;
else if (path_mat[x - 1][y - 1] == 1)
{
g_h[row1 + x - 1][col1 + y - 1][row1] -= 1.0;
g_v[row1 + x - 1][col1 + y - 1][col1] -= 1.0;
if (align_edge[row1][col1][x - 2][y - 2] <= 0)
g_a[row1 + x - 1][col1 + y - 1][row1] += 1.0;
x--; y--;
}
}
}
}
}*/
#pragma omp parallel for schedule(dynamic) private(path_mat)
for (int row1 = 0; row1 < nb_row; row1++) {
double dp_mat_test[100] = {0};
for (int col1 = 0; col1 < nb_col; col1++) {
if (align_node_xvalue[row1][col1]) {
for (int x = -2; x < 3; x++) {
if (row1 + x >= 0 && row1 + x < nb_row)
need_to_change[row1 + x][col1] = true;
}
//---------------------------------------------------------------------y
double replace(0);
for (int row2 = 0; row2 < nb_row - row1; row2++) {
if (row2 != 0) {
for (int col2 = 0; col2 < nb_col - col1; col2++) {
if (col2 != 0) {
double temp;
double temp2 = align_edge[row1][col1][row2 - 1][col2 - 1];
if (row1 + row2 < current_problem->up[col1 + col2] &&
row1 + row2 > current_problem->lo[col1 + col2])
temp = replace + temp2
- lambda_h[row1 + row2][col1 + col2][row1]
- lambda_v[row1 + row2][col1 + col2][col1]
+ (temp2 > 0 ? 0 : lambda_a[row1 + row2][col1 + col2][row1]);
else
temp = -INFINITY;
replace = dp_mat_test[col2];
if (dp_mat_test[col2] <= dp_mat_test[col2 - 1]) {
dp_mat_test[col2] = dp_mat_test[col2 - 1];
path_mat[row2][col2] = -1;
} else {
path_mat[row2][col2] = -2;
}
if (temp > dp_mat_test[col2]) {
dp_mat_test[col2] = temp;
path_mat[row2][col2] = 1;
}
} else {
replace = dp_mat_test[0];
if (row2 + row1 < current_problem->up[col1] &&
row2 + row1 > current_problem->lo[col1]) {
dp_mat_test[0] = 0;
path_mat[row2][0] = -2;
} else {
dp_mat_test[0] = -INFINITY;
path_mat[row2][0] = -2;
}
}
}
} else {
for (int col2 = 0; col2 < nb_col - col1; col2++) {
if (row1 < current_problem->up[col2 + col1] &&
row1 > current_problem->lo[col1 + col2]) {
dp_mat_test[col2] = 0;
path_mat[0][col2] = -1;
} else {
dp_mat_test[col2] = -INFINITY;
path_mat[0][col2] = -1;
}
}
}
}
int x = nb_row - row1;
int y = nb_col - col1;
while (x != 1 || y != 1) {
if (path_mat[x - 1][y - 1] == -1)
y--;
else if (path_mat[x - 1][y - 1] == -2)
x--;
else if (path_mat[x - 1][y - 1] == 1) {
g_h[row1 + x - 1][col1 + y - 1][row1] -= 1.0;
g_v[row1 + x - 1][col1 + y - 1][col1] -= 1.0;
if (align_edge[row1][col1][x - 2][y - 2] <= 0)
g_a[row1 + x - 1][col1 + y - 1][row1] += 1.0;
x--;
y--;
}
}
}
}
}
//---------------------------------------------------------------------update g
//---------------------------------------------------------------------compute thelta
double temp(0);
#pragma omp parallel for schedule(dynamic) reduction(+: temp)
for (int row1 = 0; row1 < nb_row; row1++) {
for (int col1 = 0; col1 < nb_col; col1++) {
if (row1 < current_problem->up[col1] && row1 > current_problem->lo[col1]) {
for (int j_1 = 0; j_1 < row1; j_1++) {
if (g_h[row1][col1][j_1] != 0)
temp++;
if (g_a[row1][col1][j_1] != 0)
temp++;
}
for (int l_1 = 0; l_1 < col1; l_1++) {
if (g_v[row1][col1][l_1] != 0)
temp++;
}
}
}
}
thelta = alpha * (ub - global_lb) / temp;
//---------------------------------------------------------------------compute thelta
//---------------------------------------------------------------------update lambda
#pragma omp parallel for schedule(dynamic)
for (int row1 = 0; row1 < nb_row; row1++) {
for (int col1 = 0; col1 < nb_col; col1++) {
if (row1 < current_problem->up[col1] && row1 > current_problem->lo[col1]) {
for (int j = 0; j < row1; j++) {
lambda_h[row1][col1][j] -= thelta * g_h[row1][col1][j];
g_h[row1][col1][j] = 0;
if (lambda_h[row1][col1][j] < 0)
lambda_h[row1][col1][j] = 0;
lambda_a[row1][col1][j] -= thelta * g_a[row1][col1][j];
g_a[row1][col1][j] = 0;
if (lambda_a[row1][col1][j] < 0)
lambda_a[row1][col1][j] = 0;
}
for (int l = 0; l < col1; l++) {
lambda_v[row1][col1][l] -= thelta * g_v[row1][col1][l];
g_v[row1][col1][l] = 0;
if (lambda_v[row1][col1][l] < 0)
lambda_v[row1][col1][l] = 0;
}
}
}
}
//---------------------------------------------------------------------update lambda
//---------------------------------------------------------------------lambda update
for (int row1 = 0; row1 < nb_row; row1++) {
delete[] align_node_xvalue[row1];
}
delete[] align_node_xvalue;
}
//---------------------------------------------------------------------LR
for (int i = 0; i < nb_row; i++) {
for (int j = 0; j < nb_col; j++) {
delete[] lambda_h[i][j];
delete[] lambda_v[i][j];
delete[] lambda_a[i][j];
delete[] g_h[i][j];
delete[] g_v[i][j];
delete[] g_a[i][j];
}
delete[] lambda_h[i];
delete[] lambda_v[i];
delete[] lambda_a[i];
delete[] g_h[i];
delete[] g_a[i];
delete[] g_v[i];
}
delete[] g_h;
delete[] g_a;
delete[] g_v;
delete[] lambda_h;
delete[] lambda_v;
delete[] lambda_a;
current_problem->ub = global_ub;
current_problem->lb = global_lb;
return 1;
}
|
serial_tree_learner.h
|
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include "feature_histogram.hpp"
#include "split_info.hpp"
#include "data_partition.hpp"
#include "leaf_splits.hpp"
#include <cstdio>
#include <vector>
#include <random>
#include <cmath>
#include <memory>
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const TreeConfig* tree_config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const TreeConfig* tree_config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
protected:
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Find best thresholds for all features, using multi-threading.
* The result will be stored in smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called in FindBestSplit.
*/
virtual void FindBestThresholds();
/*!
* \brief Find best features for leaves from smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called after FindBestThresholds.
*/
virtual void FindBestSplitsForLeaves();
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const TreeConfig* tree_config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leafIdx) const {
if (leafIdx >= 0) {
return data_partition_->leaf_count(leafIdx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
THTensorMath.c
|
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#define TH_OMP_OVERHEAD_THRESHOLD 100000
void THTensor_(fill)(THTensor *r_, real value)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, value, r__size); break;);
}
void THTensor_(zero)(THTensor *r_)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, 0, r__size); break;);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = value;
});
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
long cntr = 0;
long nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
long numel = THByteTensor_sumall(mask);
real *tensor_data;
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(src_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
long numel = 0;
long *subscript_data;
long i = 0;
long dim;
long div = 1;
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data != 0) {
++numel;
});
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data != 0) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
long rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
long max = src->size[0] - 1 + TH_INDEX_BASE;
for (i=0; i<numel; i++) {
if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,
index_data[i] - TH_INDEX_BASE,
THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long elems_per_row, i, idx;
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in gather");
}
*(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
#undef th_isnan
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan(val) \
if (isnan(value)) break;
#else
#define th_isnan(val)
#endif
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
th_isnan(value)
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
th_isnan(value)
});
return theMax;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] + value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(sub)(THTensor *r_, THTensor *t, real value)
{
THTensor_(add)(r_, t, -value);
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(fmod)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = fmod(tp[i], value);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value););
}
}
void THTensor_(remainder)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
}
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
real t_val;
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i< sz; i++)
rp[i] = tp[i] + value * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src)
{
THTensor_(cadd)(r_, t, -value, src);
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(tp[i], sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = fmod(tp[i], sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
}
}
void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
}
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(value, tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
r__ = THTensor_(newClone)(transp_r_);
THTensor_(free)(transp_r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
THTensor_(copy)(result, t);
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
long bs = THTensor_(size)(batch1, 0);
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
THTensor_(copy)(result, t);
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
long THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
real theMax;
real value;
long theIndex;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
th_isnan(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
real theMin;
real value;
long theIndex;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMin = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theIndex = i;
theMin = value;
th_isnan(value)
}
}
*indices__data = theIndex;
*values__data = theMin;);
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
long size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (long) (((xmax - xmin) / step) + 1);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride)
{
long P, L, R, i, j, swap, pid;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
pid = IDX(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
real mode = 0;
long modei = 0;
long temp_freq = 0;
long max_freq = 0;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1);
for(i = 0; i < t_size_dim; i++)
{
temp_freq++;
if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1]))
{
if (temp_freq > max_freq)
{
mode = temp__data[i];
modei = tempi__data[i];
max_freq = temp_freq;
}
temp_freq = 0;
}
}
*values__data = mode;
*indices__data = modei;);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
}
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1);
*values__data = temp__data[k-1];
*indices__data = tempi__data[k-1];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
long t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k+1, dimension);
}
void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted)
{
int numDims = THTensor_(nDimension)(t);
THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range");
long sliceSize = THTensor_(size)(t, dim);
THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension");
THTensor *tmpResults = THTensor_(new)();
THTensor_(resize1d)(tmpResults, sliceSize);
real *tmp__data = THTensor_(data)(tmpResults);
THLongTensor *tmpIndices = THLongTensor_new();
THLongTensor_resize1d(tmpIndices, sliceSize);
long *tmpi__data = THLongTensor_data(tmpIndices);
THLongStorage *topKSize = THTensor_(newSizeOf)(t);
THLongStorage_set(topKSize, dim, k);
THTensor_(resize)(rt_, topKSize, NULL);
THLongTensor_resize(ri_, topKSize, NULL);
THLongStorage_free(topKSize);
if (dir) {
/* k largest elements, descending order (optional: see sorted) */
long K = sliceSize - k;
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
if (K > 0)
THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i + K];
ri__data[i*ri__stride] = tmpi__data[i + K];
})
}
else {
/* k smallest elements, ascending order (optional: see sorted) */
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i];
ri__data[i*ri__stride] = tmpi__data[i];
})
}
THTensor_(free)(tmpResults);
THLongTensor_free(tmpIndices);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k+1); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THTensor_(catArray)(r_, inputs, 2, dimension);
}
void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j;
long offset;
int ndim = dimension + 1;
for (i = 0; i < numInputs; i++)
{
ndim = THMax(ndim, inputs[i]->nDimension);
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
long dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : 1;
if (i == dimension)
{
for (j = 1; j < numInputs; j++)
{
dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : 1;
}
}
else
{
for (j = 1; j < numInputs; j++)
{
if (dimSize != (i < inputs[j]->nDimension ? inputs[j]->size[i] : 1))
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
}
}
size->data[i] = dimSize;
}
THTensor_(resize)(result, size, NULL);
THLongStorage_free(size);
offset = 0;
for (j = 0; j < numInputs; j++)
{
long dimSize = dimension < inputs[j]->nDimension ? inputs[j]->size[dimension] : 1;
THTensor *nt = THTensor_(newWithTensor)(result);
THTensor_(narrow)(nt, NULL, dimension, offset, dimSize);
THTensor_(copy)(nt, inputs[j]);
THTensor_(free)(nt);
offset += dimSize;
}
}
int THTensor_(equal)(THTensor *ta, THTensor* tb)
{
int equal = 1;
if(!THTensor_(isSameSizeAs)(ta, tb))
return 0;
if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) {
real *tap = THTensor_(data)(ta);
real *tbp = THTensor_(data)(tb);
long sz = THTensor_(nElement)(ta);
long i;
for (i=0; i<sz; ++i){
if(tap[i] != tbp[i]) return 0;
}
} else {
// Short-circuit the apply function on inequality
TH_TENSOR_APPLY2(real, ta, real, tb,
if (equal && *ta_data != *tb_data) {
equal = 0;
TH_TENSOR_APPLY_hasFinished = 1; break;
})
}
return equal;
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(real, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
} \
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
#endif /* long only part */
#if defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
LAB_IMPLEMENT_BASIC_FUNCTION(log,log)
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p)
LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_sigmoid)
LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp)
LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos)
LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos)
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh)
LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin)
LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin)
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh)
LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan)
LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan)
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh)
LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt)
LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_rsqrt)
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil)
LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor)
LAB_IMPLEMENT_BASIC_FUNCTION(round,round)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs)
LAB_IMPLEMENT_BASIC_FUNCTION(trunc,trunc)
LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_frac)
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
LAB_IMPLEMENT_BASIC_FUNCTION(cinv, 1.0 / )
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data););
}
void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight)
{
THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_lerp(*a_data, *b_data, weight););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum/t_size;);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
});
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += pow(fabs(t_data[i*t_stride]), value);
*r__data = pow(sum, 1.0/value);)
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += fabs(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else {
TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value););
return pow(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += pow(fabs(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += pow(fabs(*tensor_data - *src_data), value);)
return pow(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= (THTensor_(nElement)(tensor)-1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor)
{
return sqrt(THTensor_(varall)(tensor));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = a;
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a);
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
real minval;
real maxval;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data >= minval && *tensor_data <= maxval) {
const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins);
h_data[THMin(bin, nbins-1)] += 1;
}
);
}
#endif /* floating point only part */
#endif
|
dds.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
utils.h
|
#include "structures.h"
#ifndef UTILSH
#define UTILSH
namespace utils
{
static void print_help_message()
{
std::cout << "Please provide an obj filename using -f <filename.obj>" << std::endl;
std::cout << "Optional: -of specifies outfile name" << std::endl;
std::cout << "Optional: -co specifies cost threshold (=infinite)" << std::endl;
std::cout << "Optional: -ct specifies threshold for grid chart splitting by normal variance (=0.001)" << std::endl;
std::cout << "Optional: -debug writes charts to obj file, as colours that override texture coordinates (=false)" << std::endl;
std::cout << "Optional: -tkd num triangles per kdtree node (default: 32000)" << std::endl;
std::cout << "Optional: -tbvh num triangles per kdtree node (default: 8192)" << std::endl;
std::cout << "Optional: -single-max: specifies largest possible single output texture (=4096)" << std::endl;
std::cout << "Optional: -multi-max: specifies largest possible output texture (=8192)" << std::endl;
}
static void initialize_glut_window(int argc, char** argv, cmd_options& opt)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(opt.single_tex_limit, opt.single_tex_limit);
glutInitWindowPosition(64, 64);
glutCreateWindow(argv[0]);
glutSetWindowTitle(STRING_APP_NAME.c_str());
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glDisable(GL_CULL_FACE);
glewExperimental = GL_TRUE;
glewInit();
glutHideWindow();
}
static char* getCmdOption(char** begin, char** end, const std::string& option)
{
char** itr = std::find(begin, end, option);
if(itr != end && ++itr != end)
{
return *itr;
}
return 0;
}
static bool cmdOptionExists(char** begin, char** end, const std::string& option) { return std::find(begin, end, option) != end; }
// parses a face string like "f 2//1 8//1 4//1 " into 3 given arrays
static void parse_face_string(std::string face_string, int (&index)[3], int (&coord)[3], int (&normal)[3])
{
// split by space into faces
std::vector<std::string> faces;
boost::algorithm::trim(face_string);
boost::algorithm::split(faces, face_string, boost::algorithm::is_any_of(" "), boost::algorithm::token_compress_on);
for(int i = 0; i < 3; ++i)
{
// split by / for indices
std::vector<std::string> inds;
boost::algorithm::split(inds, faces[i], [](char c) { return c == '/'; }, boost::algorithm::token_compress_off);
for(int j = 0; j < (int)inds.size(); ++j)
{
int idx = 0;
// parse value from string
if(inds[j] != "")
{
idx = stoi(inds[j]);
}
if(j == 0)
{
index[i] = idx;
}
else if(j == 1)
{
coord[i] = idx;
}
else if(j == 2)
{
normal[i] = idx;
}
}
}
}
static std::shared_ptr<texture_t> load_image(const std::string& filepath)
{
std::cout << "Loading image file " << filepath << std::endl;
std::vector<unsigned char> img;
unsigned int width = 0;
unsigned int height = 0;
int tex_error = lodepng::decode(img, width, height, filepath);
if(tex_error)
{
std::cout << "ERROR: unable to load image file (not a .png?) " << filepath << std::endl;
exit(1);
}
else
{
std::cout << "Loaded texture from " << filepath << std::endl;
}
auto texture = std::make_shared<texture_t>(width, height, GL_LINEAR);
texture->set_pixels(&img[0]);
return texture;
}
static void save_image(std::string& filename, std::vector<uint8_t>& image, int width, int height)
{
unsigned int tex_error = lodepng::encode(filename, image, width, height);
if(tex_error)
{
std::cerr << "ERROR: unable to save image file " << filename << std::endl;
std::cerr << tex_error << ": " << lodepng_error_text(tex_error) << std::endl;
}
std::cout << "Saved image to " << filename << std::endl;
}
static void save_framebuffer_to_image(std::string filename, std::shared_ptr<frame_buffer_t> frame_buffer)
{
std::vector<uint8_t> pixels;
frame_buffer->get_pixels(0, pixels);
int tex_error = lodepng::encode(filename, pixels, frame_buffer->get_width(), frame_buffer->get_height());
if(tex_error)
{
std::cout << "ERROR: unable to save image file " << filename << std::endl;
exit(1);
}
std::cout << "Framebuffer written to image " << filename << std::endl;
}
#ifdef ADHOC_PARSER
// load obj function from vt_obj_loader/utils.h
static BoundingBoxLimits
load_obj(const std::string& filename, std::vector<double>& v, std::vector<int>& vindices, std::vector<double>& t, std::vector<int>& tindices, std::vector<std::string>& materials)
{
scm::math::vec3f min_pos(std::numeric_limits<double>::max(), std::numeric_limits<double>::max(), std::numeric_limits<double>::max());
scm::math::vec3f max_pos(std::numeric_limits<double>::lowest(), std::numeric_limits<double>::lowest(), std::numeric_limits<double>::lowest());
FILE* file = fopen(filename.c_str(), "r");
if(0 != file)
{
std::string current_material = "";
while(true)
{
char line[128];
int32_t l = fscanf(file, "%s", line);
if(l == EOF)
break;
if(strcmp(line, "usemtl") == 0)
{
char name[128];
fscanf(file, "%s\n", name);
current_material = std::string(name);
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\n'), current_material.end());
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\r'), current_material.end());
boost::trim(current_material);
std::cout << "obj switch material: " << current_material << std::endl;
}
else if(strcmp(line, "v") == 0)
{
double vx, vy, vz;
fscanf(file, "%lf %lf %lf\n", &vx, &vy, &vz);
// scaling for zebra
// vx *= 0.01;
// vy *= 0.01;
// vz *= 0.01;
v.insert(v.end(), {vx, vy, vz});
// compare to find bounding box limits
if(vx > max_pos.x)
{
max_pos.x = vx;
}
else if(vx < min_pos.x)
{
min_pos.x = vx;
}
if(vy > max_pos.y)
{
max_pos.y = vy;
}
else if(vy < min_pos.y)
{
min_pos.y = vy;
}
if(vz > max_pos.z)
{
max_pos.z = vz;
}
else if(vz < min_pos.z)
{
min_pos.z = vz;
}
}
// else if (strcmp(line, "vn") == 0) {
// float nx, ny, nz;
// fscanf(file, "%f %f %f\n", &nx, &ny, &nz);
// n.insert(n.end(), {nx,ny, nz});
// }
else if(strcmp(line, "vt") == 0)
{
double tx, ty;
fscanf(file, "%lf %lf\n", &tx, &ty);
t.insert(t.end(), {tx, ty});
}
else if(strcmp(line, "f") == 0)
{
fgets(line, 128, file);
std::string face_string = line;
int index[3];
int coord[3];
int normal[3];
parse_face_string(face_string, index, coord, normal);
// here all indices are decremented by 1 to fit 0 indexing schemes
vindices.insert(vindices.end(), {index[0] - 1, index[1] - 1, index[2] - 1});
tindices.insert(tindices.end(), {coord[0] - 1, coord[1] - 1, coord[2] - 1});
// nindices.insert(nindices.end(), {normal[0]-1, normal[1]-1, normal[2]-1});
materials.push_back(current_material);
}
}
fclose(file);
std::cout << "positions: " << v.size() / 3 << std::endl;
// std::cout << "normals: " << n.size()/3 << std::endl;
std::cout << "coords: " << t.size() / 2 << std::endl;
std::cout << "faces: " << vindices.size() / 3 << std::endl;
}
BoundingBoxLimits bbox;
bbox.min = min_pos;
bbox.max = max_pos;
return bbox;
}
static bool load_mtl(const std::string& mtl_filename, std::map<std::string, std::pair<std::string, int>>& material_map)
{
// parse .mtl file
std::cout << "loading .mtl file ..." << std::endl;
std::ifstream mtl_file(mtl_filename.c_str());
if(!mtl_file.is_open())
{
std::cout << "could not open .mtl file" << std::endl;
return false;
}
std::string current_material = "";
int material_index = 0;
std::string line;
while(std::getline(mtl_file, line))
{
boost::trim(line);
if(line.length() >= 2)
{
if(line[0] == '#')
{
continue;
}
if(line.substr(0, 6) == "newmtl")
{
current_material = line.substr(7);
boost::trim(current_material);
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\n'), current_material.end());
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\r'), current_material.end());
std::cout << "found: " << current_material << std::endl;
material_map[current_material] = std::make_pair("", -1);
}
else if(line.substr(0, 6) == "map_Kd")
{
std::string current_texture = line.substr(7);
current_texture.erase(std::remove(current_texture.begin(), current_texture.end(), '\n'), current_texture.end());
current_texture.erase(std::remove(current_texture.begin(), current_texture.end(), '\r'), current_texture.end());
boost::trim(current_texture);
std::cout << current_material << " -> " << current_texture << ", " << material_index << std::endl;
material_map[current_material] = std::make_pair(current_texture, material_index);
++material_index;
}
}
}
mtl_file.close();
return true;
}
#endif
// writes the texture id of each face of a polyhedron into a text file
static void write_tex_id_file(Polyhedron& P, std::string tex_file_name)
{
// write chart file
std::ofstream ocfs(tex_file_name);
for(Facet_iterator fi = P.facets_begin(); fi != P.facets_end(); ++fi)
{
ocfs << fi->tex_id << " ";
}
ocfs.close();
std::cout << "Texture id per face file written to: " << tex_file_name << std::endl;
}
// reads dimensions of png from header
static scm::math::vec2i get_png_dimensions(std::string filepath)
{
std::ifstream in(filepath);
uint32_t width, height;
in.seekg(16);
in.read((char*)&width, 4);
in.read((char*)&height, 4);
width = be32toh(width);
height = be32toh(height);
return scm::math::vec2i(width, height);
}
#ifdef VCG_PARSER
static void load_obj(const std::string& file, std::vector<indexed_triangle_t>& triangles, std::map<uint32_t, texture_info>& texture_info_map)
{
triangles.clear();
texture_info_map.clear();
CMesh m;
{
using namespace vcg::tri;
using namespace vcg::tri::io;
ImporterOBJ<CMesh>::Info oi;
bool mask_load_success = ImporterOBJ<CMesh>::LoadMask(file.c_str(), oi);
const int load_mask = oi.mask;
int load_error = ImporterOBJ<CMesh>::Open(m, file.c_str(), oi);
const int expected_mask = Mask::IOM_VERTCOORD | Mask::IOM_VERTTEXCOORD | Mask::IOM_WEDGTEXCOORD | Mask::IOM_VERTNORMAL;
if(!mask_load_success || load_error != ImporterOBJ<CMesh>::OBJError::E_NOERROR)
{
if(ImporterOBJ<CMesh>::ErrorCritical(load_error))
{
throw std::runtime_error("Failed to load the model: " + std::string(ImporterOBJ<CMesh>::ErrorMsg(load_error)));
}
else
{
std::cerr << std::string(ImporterOBJ<CMesh>::ErrorMsg(load_error)) << std::endl;
exit(1);
}
}
if(!(load_mask & expected_mask))
{
throw std::runtime_error("Mesh does not contain necessary components, mask of missing components: " + std::to_string(load_mask ^ expected_mask));
}
UpdateTopology<CMesh>::FaceFace(m);
UpdateTopology<CMesh>::VertexFace(m);
}
triangles.resize((size_t)m.FN());
for(size_t i = 0; i < m.FN(); i++)
{
auto v_0 = m.face[i].V(0);
auto v_1 = m.face[i].V(1);
auto v_2 = m.face[i].V(2);
memcpy(&triangles[i].v0_.pos_.data_array[0], &v_0->P()[0], sizeof(float) * 3);
memcpy(&triangles[i].v1_.pos_.data_array[0], &v_1->P()[0], sizeof(float) * 3);
memcpy(&triangles[i].v2_.pos_.data_array[0], &v_2->P()[0], sizeof(float) * 3);
memcpy(&triangles[i].v0_.nml_.data_array[0], &v_0->N()[0], sizeof(float) * 3);
memcpy(&triangles[i].v1_.nml_.data_array[0], &v_1->N()[0], sizeof(float) * 3);
memcpy(&triangles[i].v2_.nml_.data_array[0], &v_2->N()[0], sizeof(float) * 3);
triangles[i].v0_.tex_.x = m.face[i].WT(0).U();
triangles[i].v0_.tex_.y = m.face[i].WT(0).V();
triangles[i].v1_.tex_.x = m.face[i].WT(1).U();
triangles[i].v1_.tex_.y = m.face[i].WT(1).V();
triangles[i].v2_.tex_.x = m.face[i].WT(2).U();
triangles[i].v2_.tex_.y = m.face[i].WT(2).V();
triangles[i].tri_id_ = (uint32_t)i;
triangles[i].tex_idx_ = m.face[i].WT(0).n();
}
for(uint32_t i = 0; i < m.textures.size(); i++)
{
std::string texture_filename = m.textures[i];
if(texture_filename.size() > 3)
{
texture_filename = texture_filename.substr(0, texture_filename.size() - 3) + "png";
}
uint32_t material_id = i;
std::cout << "Material " << m.textures[i] << " : " << texture_filename << " : " << material_id << std::endl;
if(boost::filesystem::exists(texture_filename) && boost::algorithm::ends_with(texture_filename, ".png"))
{
texture_info_map[material_id] = {texture_filename, get_png_dimensions(texture_filename)};
}
else if(texture_filename == "")
{
// ok
}
else
{
std::cout << "ERROR: texture " << texture_filename << " was not found or is not a .png" << std::endl;
exit(1);
}
}
}
#endif
#ifdef ADHOC_PARSER
// load an .obj file and return all vertices, normals and coords interleaved
static void load_obj(const std::string& _file, std::vector<lamure::mesh::triangle_t>& triangles, std::vector<std::string>& materials)
{
triangles.clear();
std::vector<float> v;
std::vector<uint32_t> vindices;
std::vector<float> n;
std::vector<uint32_t> nindices;
std::vector<float> t;
std::vector<uint32_t> tindices;
uint32_t num_tris = 0;
FILE* file = fopen(_file.c_str(), "r");
std::string current_material = "";
if(0 != file)
{
while(true)
{
char line[128];
int32_t l = fscanf(file, "%s", line);
if(l == EOF)
break;
if(strcmp(line, "usemtl") == 0)
{
char name[128];
fscanf(file, "%s\n", name);
current_material = std::string(name);
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\n'), current_material.end());
current_material.erase(std::remove(current_material.begin(), current_material.end(), '\r'), current_material.end());
boost::trim(current_material);
std::cout << "obj switch material: " << current_material << std::endl;
}
else if(strcmp(line, "v") == 0)
{
float vx, vy, vz;
fscanf(file, "%f %f %f\n", &vx, &vy, &vz);
v.insert(v.end(), {vx, vy, vz});
}
else if(strcmp(line, "vn") == 0)
{
float nx, ny, nz;
fscanf(file, "%f %f %f\n", &nx, &ny, &nz);
n.insert(n.end(), {nx, ny, nz});
}
else if(strcmp(line, "vt") == 0)
{
float tx, ty;
fscanf(file, "%f %f\n", &tx, &ty);
t.insert(t.end(), {tx, ty});
}
else if(strcmp(line, "f") == 0)
{
std::string vertex1, vertex2, vertex3;
uint32_t index[3];
uint32_t coord[3];
uint32_t normal[3];
fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &index[0], &coord[0], &normal[0], &index[1], &coord[1], &normal[1], &index[2], &coord[2], &normal[2]);
vindices.insert(vindices.end(), {index[0], index[1], index[2]});
tindices.insert(tindices.end(), {coord[0], coord[1], coord[2]});
nindices.insert(nindices.end(), {normal[0], normal[1], normal[2]});
materials.push_back(current_material);
}
}
fclose(file);
std::cout << "positions: " << vindices.size() << std::endl;
std::cout << "normals: " << nindices.size() << std::endl;
std::cout << "coords: " << tindices.size() << std::endl;
triangles.resize(nindices.size() / 3);
for(uint32_t i = 0; i < nindices.size() / 3; i++)
{
lamure::mesh::triangle_t tri;
for(uint32_t j = 0; j < 3; ++j)
{
scm::math::vec3f position(v[3 * (vindices[3 * i + j] - 1)], v[3 * (vindices[3 * i + j] - 1) + 1], v[3 * (vindices[3 * i + j] - 1) + 2]);
scm::math::vec3f normal(n[3 * (nindices[3 * i + j] - 1)], n[3 * (nindices[3 * i + j] - 1) + 1], n[3 * (nindices[3 * i + j] - 1) + 2]);
scm::math::vec2f coord(t[2 * (tindices[3 * i + j] - 1)], t[2 * (tindices[3 * i + j] - 1) + 1]);
switch(j)
{
case 0:
tri.v0_.pos_ = position;
tri.v0_.nml_ = normal;
tri.v0_.tex_ = coord;
break;
case 1:
tri.v1_.pos_ = position;
tri.v1_.nml_ = normal;
tri.v1_.tex_ = coord;
break;
case 2:
tri.v2_.pos_ = position;
tri.v2_.nml_ = normal;
tri.v2_.tex_ = coord;
break;
default:
break;
}
}
triangles[i] = tri;
}
}
else
{
std::cout << "failed to open file: " << _file << std::endl;
exit(1);
}
}
#endif
static void extract_cmd_options(int argc, char** argv, std::string& obj_filename, cmd_options& opt)
{
opt.out_filename = obj_filename.substr(0, obj_filename.size() - 4) + "_charts.obj";
if(cmdOptionExists(argv, argv + argc, "-of"))
{
opt.out_filename = getCmdOption(argv, argv + argc, "-of");
}
opt.want_raw_file = false;
if(cmdOptionExists(argv, argv + argc, "-raw"))
{
opt.want_raw_file = true;
}
opt.cost_threshold = std::numeric_limits<double>::max();
if(cmdOptionExists(argv, argv + argc, "-co"))
{
opt.cost_threshold = atof(getCmdOption(argv, argv + argc, "-co"));
}
opt.chart_threshold = std::numeric_limits<uint32_t>::max();
if(cmdOptionExists(argv, argv + argc, "-ch"))
{
opt.chart_threshold = atoi(getCmdOption(argv, argv + argc, "-ch"));
}
opt.e_fit_cf = 1.0;
if(cmdOptionExists(argv, argv + argc, "-ef"))
{
opt.e_fit_cf = atof(getCmdOption(argv, argv + argc, "-ef"));
}
opt.e_ori_cf = 1.0;
if(cmdOptionExists(argv, argv + argc, "-eo"))
{
opt.e_ori_cf = atof(getCmdOption(argv, argv + argc, "-eo"));
}
opt.e_shape_cf = 1.0;
if(cmdOptionExists(argv, argv + argc, "-es"))
{
opt.e_shape_cf = atof(getCmdOption(argv, argv + argc, "-es"));
}
opt.cst = 0.001;
if(cmdOptionExists(argv, argv + argc, "-ct"))
{
opt.cst = atof(getCmdOption(argv, argv + argc, "-ct"));
}
if(cmdOptionExists(argv, argv + argc, "-debug"))
{
opt.write_charts_as_textures = true;
}
opt.num_tris_per_node_kdtree = 1024 * 32;
if(cmdOptionExists(argv, argv + argc, "-tkd"))
{
opt.num_tris_per_node_kdtree = atoi(getCmdOption(argv, argv + argc, "-tkd"));
}
opt.num_tris_per_node_bvh = 8 * 1024;
if(cmdOptionExists(argv, argv + argc, "-tbvh"))
{
opt.num_tris_per_node_bvh = atoi(getCmdOption(argv, argv + argc, "-tbvh"));
}
opt.single_tex_limit = 4096;
if(cmdOptionExists(argv, argv + argc, "-single-max"))
{
opt.single_tex_limit = atoi(getCmdOption(argv, argv + argc, "-single-max"));
std::cout << "Single output texture limited to " << opt.single_tex_limit << std::endl;
}
opt.multi_tex_limit = 8192;
if(cmdOptionExists(argv, argv + argc, "-multi-max"))
{
opt.multi_tex_limit = atoi(getCmdOption(argv, argv + argc, "-multi-max"));
std::cout << "Multi output texture limited to " << opt.multi_tex_limit << std::endl;
}
}
static void initialize_nodes(app_state& state)
{
uint32_t first_leaf_id = state.kdtree->get_first_node_id_of_depth(state.kdtree->get_depth());
uint32_t num_leaf_ids = state.kdtree->get_length_of_depth(state.kdtree->get_depth());
for(uint32_t i = 0; i < num_leaf_ids; ++i)
{
state.node_ids.push_back(i + first_leaf_id);
}
}
static void reorder_triangles(app_state& state)
{
// here, we make sure that triangles is in the same ordering as the leaf level triangles
uint32_t first_leaf_id = state.bvh->get_first_node_id_of_depth(state.bvh->get_depth());
uint32_t num_leaf_ids = state.bvh->get_length_of_depth(state.bvh->get_depth());
state.triangles.clear();
for(uint32_t node_id = first_leaf_id; node_id < first_leaf_id + num_leaf_ids; ++node_id)
{
state.triangles.insert(state.triangles.end(), state.bvh->get_triangles(node_id).begin(), state.bvh->get_triangles(node_id).end());
}
std::cout << "Reordered " << state.triangles.size() << " triangles" << std::endl;
}
static void prepare_charts(app_state& state)
{
for(uint32_t tri_id = 0; tri_id < state.triangles.size(); ++tri_id)
{
const auto& tri = state.triangles[tri_id];
uint32_t area_id = tri.area_id;
uint32_t chart_id = tri.chart_id;
if(chart_id != (uint32_t)-1 && tri.get_area() > 0.f)
{
state.chart_map[area_id][chart_id].id_ = chart_id;
state.chart_map[area_id][chart_id].original_triangle_ids_.insert(tri_id);
}
}
for(uint32_t area_id = 0; area_id < state.num_areas; ++area_id)
{
// init charts
for(auto& it : state.chart_map[area_id])
{
it.second.rect_ = rectangle{scm::math::vec2f(std::numeric_limits<float>::max()), scm::math::vec2f(std::numeric_limits<float>::lowest()), it.first, false};
it.second.box_ = lamure::bounding_box(scm::math::vec3d(std::numeric_limits<float>::max()), scm::math::vec3d(std::numeric_limits<float>::lowest()));
}
}
}
static void expand_charts(app_state& state)
{
// grow chart boxes by all triangles in all levels of bvh
for(uint32_t node_id = 0; node_id < state.bvh->get_num_nodes(); node_id++)
{
const std::vector<lamure::mesh::Triangle_Chartid>& tris = state.bvh->get_triangles(node_id);
for(const auto& tri : tris)
{
state.chart_map[tri.area_id][tri.chart_id].box_.expand(scm::math::vec3d(tri.v0_.pos_));
state.chart_map[tri.area_id][tri.chart_id].box_.expand(scm::math::vec3d(tri.v1_.pos_));
state.chart_map[tri.area_id][tri.chart_id].box_.expand(scm::math::vec3d(tri.v2_.pos_));
}
}
}
static void chartify_parallel(app_state& state, cmd_options& opt)
{
int prev_percent = -1;
CLUSTER_SETTINGS cluster_settings(opt.e_fit_cf, opt.e_ori_cf, opt.e_shape_cf, opt.cst);
auto lambda_chartify = [&](uint64_t i, uint32_t id) -> void {
int percent = (int)(((float)i / (float)state.node_ids.size()) * 100.f);
if(percent != prev_percent)
{
prev_percent = percent;
std::cout << "Chartification: " << percent << " %" << std::endl;
}
// build polyhedron from node.begin to node.end in accordance with indices
uint32_t node_id = state.node_ids[i];
const auto& node = state.kdtree->get_nodes()[node_id];
const auto& indices = state.kdtree->get_indices();
BoundingBoxLimits limits;
limits.min = scm::math::vec3f(std::numeric_limits<float>::max());
limits.max = scm::math::vec3f(std::numeric_limits<float>::lowest());
std::vector<indexed_triangle_t> node_triangles;
node_triangles.resize(node.end_ - node.begin_);
#ifdef PARALLEL_EXECUTION
#pragma omp parallel for
#endif
for(uint32_t idx = node.begin_; idx < node.end_; ++idx)
{
const auto& tri = state.all_indexed_triangles[indices[idx]];
node_triangles[idx - node.begin_] = tri;
limits.min.x = std::min(limits.min.x, tri.v0_.pos_.x);
limits.min.y = std::min(limits.min.y, tri.v0_.pos_.y);
limits.min.z = std::min(limits.min.z, tri.v0_.pos_.z);
limits.min.x = std::min(limits.min.x, tri.v1_.pos_.x);
limits.min.y = std::min(limits.min.y, tri.v1_.pos_.y);
limits.min.z = std::min(limits.min.z, tri.v1_.pos_.z);
limits.min.x = std::min(limits.min.x, tri.v2_.pos_.x);
limits.min.y = std::min(limits.min.y, tri.v2_.pos_.y);
limits.min.z = std::min(limits.min.z, tri.v2_.pos_.z);
limits.max.x = std::max(limits.max.x, tri.v0_.pos_.x);
limits.max.y = std::max(limits.max.y, tri.v0_.pos_.y);
limits.max.z = std::max(limits.max.z, tri.v0_.pos_.z);
limits.max.x = std::max(limits.max.x, tri.v1_.pos_.x);
limits.max.y = std::max(limits.max.y, tri.v1_.pos_.y);
limits.max.z = std::max(limits.max.z, tri.v1_.pos_.z);
limits.max.x = std::max(limits.max.x, tri.v2_.pos_.x);
limits.max.y = std::max(limits.max.y, tri.v2_.pos_.y);
limits.max.z = std::max(limits.max.z, tri.v2_.pos_.z);
}
Polyhedron polyMesh;
polyhedron_builder<HalfedgeDS> builder(node_triangles);
polyMesh.delegate(builder);
if(!CGAL::is_triangle_mesh(polyMesh))
{
std::cerr << "ERROR: Input geometry is not valid / not triangulated." << std::endl;
return;
}
// key: face_id, value: chart_id
std::map<uint32_t, uint32_t> chart_id_map;
uint32_t active_charts = ParallelClusterCreator::create_charts(chart_id_map, polyMesh, opt.cost_threshold, opt.chart_threshold, cluster_settings);
state.per_node_chart_id_map[node_id] = chart_id_map;
state.per_node_polyhedron[node_id] = polyMesh;
};
#ifdef PARALLEL_EXECUTION
uint32_t num_threads = std::min((size_t)24, state.node_ids.size());
lamure::mesh::parallel_for(num_threads, state.node_ids.size(), lambda_chartify);
#else
lamure::mesh::parallel_for(1, state.node_ids.size(), lambda_chartify);
#endif
}
static void convert_to_triangle_soup_parallel(app_state& state)
{
state.triangles.clear();
state.num_areas = 0;
for(auto& per_node_chart_id_map_it : state.per_node_chart_id_map)
{
uint32_t node_id = per_node_chart_id_map_it.first;
auto polyMesh = state.per_node_polyhedron[node_id];
// create index
typedef CGAL::Inverse_index<Polyhedron::Vertex_const_iterator> Index;
Index index(polyMesh.vertices_begin(), polyMesh.vertices_end());
uint32_t num_of_faces = polyMesh.size_of_facets();
// extract triangle soup
uint32_t offset = state.triangles.size();
state.triangles.resize(offset + num_of_faces);
#ifdef PARALLEL_EXECUTION
#pragma omp parallel for
#endif
for(uint32_t i = 0; i < num_of_faces; i++)
{
Polyhedron::Facet_const_iterator fi = polyMesh.facets_begin();
std::advance(fi, i);
Polyhedron::Halfedge_around_facet_const_circulator hc = fi->facet_begin();
if(circulator_size(hc) != 3)
{
std::cout << "ERROR: mesh corrupt!" << std::endl;
exit(1);
}
lamure::mesh::Triangle_Chartid tri;
Polyhedron::Vertex_const_iterator it = polyMesh.vertices_begin();
std::advance(it, index[Polyhedron::Vertex_const_iterator(hc->vertex())]);
tri.v0_.pos_ = scm::math::vec3f(it->point().x(), it->point().y(), it->point().z());
tri.v0_.tex_ = scm::math::vec2f(fi->t_coords[0].x(), fi->t_coords[0].y());
tri.v0_.nml_ = scm::math::vec3f(it->point().normal.x(), it->point().normal.y(), it->point().normal.z());
++hc;
it = polyMesh.vertices_begin();
std::advance(it, index[Polyhedron::Vertex_const_iterator(hc->vertex())]);
tri.v1_.pos_ = scm::math::vec3f(it->point().x(), it->point().y(), it->point().z());
tri.v1_.tex_ = scm::math::vec2f(fi->t_coords[1].x(), fi->t_coords[1].y());
tri.v1_.nml_ = scm::math::vec3f(it->point().normal.x(), it->point().normal.y(), it->point().normal.z());
++hc;
it = polyMesh.vertices_begin();
std::advance(it, index[Polyhedron::Vertex_const_iterator(hc->vertex())]);
tri.v2_.pos_ = scm::math::vec3f(it->point().x(), it->point().y(), it->point().z());
tri.v2_.tex_ = scm::math::vec2f(fi->t_coords[2].x(), fi->t_coords[2].y());
tri.v2_.nml_ = scm::math::vec3f(it->point().normal.x(), it->point().normal.y(), it->point().normal.z());
++hc;
tri.area_id = state.num_areas;
tri.chart_id = per_node_chart_id_map_it.second[fi->id()];
tri.tex_id = fi->tex_id;
tri.tri_id = fi->tri_id;
state.triangles[offset + i] = tri;
}
++state.num_areas;
}
}
static void assign_parallel(app_state& state)
{
std::vector<uint32_t> area_ids(state.num_areas);
for(uint32_t area_id = 0; area_id < state.num_areas; ++area_id)
{
area_ids[area_id] = area_id;
}
auto lambda_append = [&](uint64_t i, uint32_t id) -> void {
// compare all triangles with chart bounding boxes
uint32_t area_id = area_ids[i];
for(auto& it : state.chart_map[area_id])
{
// uint32_t chart_id = it.first;
auto& chart = it.second;
chart.all_triangle_ids_.insert(chart.original_triangle_ids_.begin(), chart.original_triangle_ids_.end());
// add any triangles that intersect chart
for(uint32_t tri_id = 0; tri_id < state.triangles.size(); ++tri_id)
{
const auto& tri = state.triangles[tri_id];
if(tri.chart_id != -1 && tri.get_area() > 0.f)
{
if(chart.box_.contains(scm::math::vec3d(tri.v0_.pos_)) || chart.box_.contains(scm::math::vec3d(tri.v1_.pos_)) || chart.box_.contains(scm::math::vec3d(tri.v2_.pos_)))
{
chart.all_triangle_ids_.insert(tri_id);
}
}
}
}
};
#ifdef PARALLEL_EXECUTION
uint32_t num_threads = std::min((size_t)24, area_ids.size());
lamure::mesh::parallel_for(num_threads, area_ids.size(), lambda_append);
#else
lamure::mesh::parallel_for(1, area_ids.size(), lambda_append);
#endif
}
static void pack_areas(app_state& state)
{
state.area_rects.resize(state.num_areas);
#ifdef PARALLEL_EXECUTION
#pragma omp parallel for
#endif
for(uint32_t area_id = 0; area_id < state.num_areas; ++area_id)
{
calculate_chart_tex_space_sizes(state.chart_map[area_id], state.triangles, state.texture_info_map);
project_charts(state.chart_map[area_id], state.triangles);
std::cout << "Projected " << state.chart_map[area_id].size() << " charts for area " << area_id << std::endl;
std::cout << "Running rectangle packing for area " << area_id << std::endl;
// init the rectangles
std::vector<rectangle> rects;
for(auto& chart_it : state.chart_map[area_id])
{
chart& chart = chart_it.second;
if(chart.original_triangle_ids_.size() > 0)
{
rectangle rect = chart.rect_;
rect.max_ *= packing_scale;
rects.push_back(rect);
}
}
// rectangle packing
rectangle area_rect = pack(rects);
area_rect.id_ = area_id;
area_rect.flipped_ = false;
state.area_rects[area_id] = area_rect;
std::cout << "Packing of area " << area_id << " complete (" << area_rect.max_.x << ", " << area_rect.max_.y << ")" << std::endl;
// apply rectangles
for(const auto& rect : rects)
{
// std::cout << "Rectangle ID: " << rect.id_ << std::endl;
state.chart_map[area_id][rect.id_].rect_ = rect;
state.chart_map[area_id][rect.id_].projection.tex_space_rect = rect; // save for rendering from texture later on
}
}
std::cout << "Packing " << state.area_rects.size() << " areas..." << std::endl;
state.image_rect = pack(state.area_rects);
std::cout << "Packing of all areas complete (" << state.image_rect.max_.x << ", " << state.image_rect.max_.y << ")" << std::endl;
}
static void apply_texture_space_transformations_in_parallel(app_state& state)
{
std::cout << "Applying texture space transformation..." << std::endl;
for(uint32_t k = 0; k < state.area_rects.size(); k++)
{
auto& area_rect = state.area_rects[k];
std::cout << "Area " << area_rect.id_ << " min: (" << area_rect.min_.x << ", " << area_rect.min_.y << ")" << std::endl;
std::cout << "Area " << area_rect.id_ << " max: (" << area_rect.max_.x << ", " << area_rect.max_.y << ")" << std::endl;
std::cout << "Chart map size " << state.chart_map[area_rect.id_].size() << std::endl;
std::vector<uint32_t> chart_ids(state.chart_map[area_rect.id_].size());
int i = 0;
for(const auto& pair : state.chart_map[area_rect.id_])
{
chart_ids[i] = pair.first;
i++;
//std::cout << "Chart: " << std::to_string(i) << ", id: " << std::to_string(chart_ids[i]) << std::endl;
}
// std::cout << "Chart ids initialized" << std::endl;
#ifdef PARALLEL_EXECUTION
#pragma omp parallel for
#endif
// next, apply the global transformation from area packing onto all individual chart rects per area
for(uint32_t j = 0; j < chart_ids.size(); j++)
{
uint32_t chart_id = chart_ids[j];
// std::cout << "Chart " << std::to_string(chart_id) << std::endl;
if(chart_id == (uint32_t)-1)
{
std::cerr << "Skipping chart with blank ID" << std::endl;
continue;
}
auto& chart = state.chart_map[area_rect.id_][chart_id];
chart.rect_.min_ += area_rect.min_;
if(chart.all_triangle_ids_.size() == 0)
{
std::cerr << "Skipping chart " << std::to_string(chart_ids[j]) << " with no triangles" << std::endl;
continue;
}
// std::cout << "Chart " << std::to_string(chart_ids[j]) << ", all triangle ids size " << chart.all_triangle_ids_.size() << std::endl;
std::vector<int> ids(chart.all_triangle_ids_.begin(), chart.all_triangle_ids_.end());
// apply this transformation to the new parameterization
for(uint32_t a = 0; a < ids.size(); a++)
{
int tri_id = ids[a];
// std::cout << "Triangle " << std::to_string(tri_id) << std::endl;
if((chart.rect_.flipped_ && !area_rect.flipped_) || (area_rect.flipped_ && !chart.rect_.flipped_))
{
float temp = chart.all_triangle_new_coods_[tri_id][0].x;
chart.all_triangle_new_coods_[tri_id][0].x = chart.all_triangle_new_coods_[tri_id][0].y;
chart.all_triangle_new_coods_[tri_id][0].y = temp;
temp = chart.all_triangle_new_coods_[tri_id][1].x;
chart.all_triangle_new_coods_[tri_id][1].x = chart.all_triangle_new_coods_[tri_id][1].y;
chart.all_triangle_new_coods_[tri_id][1].y = temp;
temp = chart.all_triangle_new_coods_[tri_id][2].x;
chart.all_triangle_new_coods_[tri_id][2].x = chart.all_triangle_new_coods_[tri_id][2].y;
chart.all_triangle_new_coods_[tri_id][2].y = temp;
}
for(uint32_t i = 0; i < 3; ++i)
{
chart.all_triangle_new_coods_[tri_id][i] *= packing_scale;
chart.all_triangle_new_coods_[tri_id][i].x += chart.rect_.min_.x;
chart.all_triangle_new_coods_[tri_id][i].y += chart.rect_.min_.y;
chart.all_triangle_new_coods_[tri_id][i].x /= state.image_rect.max_.x;
chart.all_triangle_new_coods_[tri_id][i].y /= state.image_rect.max_.x;
}
}
}
}
std::cout << "Done applying texture space transformation" << std::endl;
}
static void update_texture_coordinates(app_state& state)
{
// use 2D array to account for different textures (if no textures were found, make sure it has at least one row)
std::cout << "texture info map size: " << state.texture_info_map.size() << std::endl;
state.to_upload_per_texture.resize(state.texture_info_map.size());
// replacing texture coordinates in LOD file
//...and at the same time, we will fill the upload per texture list
std::cout << "Updating texture coordinates in leaf-level LOD nodes..." << std::endl;
uint32_t num_invalid_tris = 0;
uint32_t num_dropped_tris = 0;
uint32_t first_leaf_id = state.bvh->get_first_node_id_of_depth(state.bvh->get_depth());
uint32_t num_leaf_ids = state.bvh->get_length_of_depth(state.bvh->get_depth());
for(uint32_t node_id = first_leaf_id; node_id < first_leaf_id + num_leaf_ids; ++node_id)
{
auto& tris = state.bvh->get_triangles(node_id);
for(int local_tri_id = 0; local_tri_id < tris.size(); ++local_tri_id)
{
int32_t tri_id = ((node_id - first_leaf_id) * (state.bvh->get_primitives_per_node() / 3)) + local_tri_id;
auto& tri = tris[local_tri_id];
// std::cout << "tri id << " << tri.tri_id << " area id " << tri.area_id << " chart id " << tri.chart_id << std::endl;
if(tri.chart_id != -1 && tri.get_area() > 0.f)
{
auto& chart = state.chart_map[tri.area_id][tri.chart_id];
if(chart.all_triangle_ids_.find(tri_id) != chart.all_triangle_ids_.end())
{
// create per-texture render list
if(tri.tex_id != -1)
{
const auto& old_tri = state.all_indexed_triangles[tri.tri_id];
double epsilon = FLT_EPSILON;
// obtain original coordinates
// since indexed cgal polyhedra dont preserve texture coordinates correctly
if(scm::math::length(tri.v0_.pos_ - old_tri.v0_.pos_) < epsilon)
tri.v0_.tex_ = old_tri.v0_.tex_;
else if(scm::math::length(tri.v0_.pos_ - old_tri.v1_.pos_) < epsilon)
tri.v0_.tex_ = old_tri.v1_.tex_;
else if(scm::math::length(tri.v0_.pos_ - old_tri.v2_.pos_) < epsilon)
tri.v0_.tex_ = old_tri.v2_.tex_;
else
{
std::cout << "WARNING: tex coord v0 could not be disambiguated (" << (int)(tri.tri_id == old_tri.tri_id_) << ")" << std::endl;
}
if(scm::math::length(tri.v1_.pos_ - old_tri.v0_.pos_) < epsilon)
tri.v1_.tex_ = old_tri.v0_.tex_;
else if(scm::math::length(tri.v1_.pos_ - old_tri.v1_.pos_) < epsilon)
tri.v1_.tex_ = old_tri.v1_.tex_;
else if(scm::math::length(tri.v1_.pos_ - old_tri.v2_.pos_) < epsilon)
tri.v1_.tex_ = old_tri.v2_.tex_;
else
{
std::cout << "WARNING: tex coord v1 could not be disambiguated (" << (int)(tri.tri_id == old_tri.tri_id_) << ")" << std::endl;
}
if(scm::math::length(tri.v2_.pos_ - old_tri.v0_.pos_) < epsilon)
tri.v2_.tex_ = old_tri.v0_.tex_;
else if(scm::math::length(tri.v2_.pos_ - old_tri.v1_.pos_) < epsilon)
tri.v2_.tex_ = old_tri.v1_.tex_;
else if(scm::math::length(tri.v2_.pos_ - old_tri.v2_.pos_) < epsilon)
tri.v2_.tex_ = old_tri.v2_.tex_;
else
{
std::cout << "WARNING: tex coord v2 could not be disambiguated (" << (int)(tri.tri_id == old_tri.tri_id_) << ")" << std::endl;
}
state.to_upload_per_texture[tri.tex_id].push_back(blit_vertex_t{tri.v0_.tex_, chart.all_triangle_new_coods_[tri_id][0]});
state.to_upload_per_texture[tri.tex_id].push_back(blit_vertex_t{tri.v1_.tex_, chart.all_triangle_new_coods_[tri_id][1]});
state.to_upload_per_texture[tri.tex_id].push_back(blit_vertex_t{tri.v2_.tex_, chart.all_triangle_new_coods_[tri_id][2]});
}
else
{
++num_dropped_tris;
}
// override texture coordinates
tri.v0_.tex_ = chart.all_triangle_new_coods_[tri_id][0];
tri.v1_.tex_ = chart.all_triangle_new_coods_[tri_id][1];
tri.v2_.tex_ = chart.all_triangle_new_coods_[tri_id][2];
tri.v0_.tex_.y = 1.0 - tri.v0_.tex_.y; // flip y coord
tri.v1_.tex_.y = 1.0 - tri.v1_.tex_.y;
tri.v2_.tex_.y = 1.0 - tri.v2_.tex_.y;
}
else
{
++num_dropped_tris;
}
}
else
{
++num_invalid_tris;
}
}
}
std::cout << "Updating texture coordinates in inner LOD nodes..." << std::endl;
#ifdef PARALLEL_EXECUTION
#pragma omp parallel for
#endif
for(uint32_t node_id = 0; node_id < first_leaf_id; ++node_id)
{
auto& tris = state.bvh->get_triangles(node_id);
for(int local_tri_id = 0; local_tri_id < tris.size(); ++local_tri_id)
{
auto& tri = tris[local_tri_id];
if(tri.chart_id != -1)
{
auto& proj_info = state.chart_map[tri.area_id][tri.chart_id].projection;
rectangle& chart_rect = state.chart_map[tri.area_id][tri.chart_id].rect_;
rectangle& area_rect = state.area_rects[tri.area_id];
// at this point we will need to project all triangles of inner nodes to their respective charts using the corresponding chart plane
scm::math::vec3f original_v;
for(uint32_t i = 0; i < 3; ++i)
{
switch(i)
{
case 0:
original_v = tri.v0_.pos_;
break;
case 1:
original_v = tri.v1_.pos_;
break;
case 2:
original_v = tri.v2_.pos_;
break;
default:
break;
}
scm::math::vec2f projected_v = project_to_plane(original_v, proj_info.proj_normal, proj_info.proj_centroid, proj_info.proj_world_up);
projected_v -= proj_info.tex_coord_offset; // correct by offset (so that min uv coord = 0)
projected_v /= proj_info.largest_max; // apply normalisation factor
if((chart_rect.flipped_ && !area_rect.flipped_) || (area_rect.flipped_ && !chart_rect.flipped_))
{ // flip if needed
float temp = projected_v.x;
projected_v.x = projected_v.y;
projected_v.y = temp;
}
projected_v *= packing_scale; // scale
projected_v += chart_rect.min_; // offset position in texture
projected_v /= state.image_rect.max_; // scale down to normalised image space
projected_v.y = 1.0 - projected_v.y; // flip y coord
// replace existing coords
switch(i)
{
case 0:
tri.v0_.tex_ = projected_v;
break;
case 1:
tri.v1_.tex_ = projected_v;
break;
case 2:
tri.v2_.tex_ = projected_v;
break;
default:
break;
}
}
}
else
{
++num_invalid_tris;
}
}
}
std::cout << "Num tris with invalid chart ids encountered: " << num_invalid_tris << std::endl;
std::cout << "Num dropped tris encountered: " << num_dropped_tris << std::endl;
}
static void write_bvh(app_state& state, std::string& obj_filename)
{
std::string bvh_filename = obj_filename.substr(0, obj_filename.size() - 4) + ".bvh";
state.bvh->write_bvh_file(bvh_filename);
std::cout << "Bvh file written to " << bvh_filename << std::endl;
std::string lod_filename = obj_filename.substr(0, obj_filename.size() - 4) + ".lod";
state.bvh->write_lod_file(lod_filename);
std::cout << "Lod file written to " << lod_filename << std::endl;
// cleanup
std::cout << "Cleanup bvh" << std::endl;
state.bvh.reset();
}
static void create_viewports(app_state& state, cmd_options& opt)
{
std::cout << "Single texture size limit: " << opt.single_tex_limit << std::endl;
std::cout << "Multi texture size limit: " << opt.multi_tex_limit << std::endl;
{
state.t_d.render_to_texture_width_ = std::max(opt.single_tex_limit, 4096);
state.t_d.render_to_texture_height_ = std::max(opt.single_tex_limit, 4096);
opt.multi_tex_limit = std::max(state.t_d.render_to_texture_width_, (uint32_t)opt.multi_tex_limit); // TODO: why?
state.t_d.full_texture_width_ = state.t_d.render_to_texture_width_;
state.t_d.full_texture_height_ = state.t_d.render_to_texture_height_;
}
// double texture size up to 8k if a given percentage of charts do not have enough pixels
std::cout << "Adjusting final texture size (" << state.t_d.full_texture_width_ << " x " << state.t_d.full_texture_height_ << ")" << std::endl;
{
calculate_new_chart_tex_space_sizes(state.chart_map, state.triangles, scm::math::vec2i(state.t_d.full_texture_width_, state.t_d.full_texture_height_));
while(!is_output_texture_big_enough(state.chart_map, target_percentage_charts_with_enough_pixels))
{
if(std::max(state.t_d.full_texture_width_, state.t_d.full_texture_height_) >= opt.multi_tex_limit)
{
std::cout << "Maximum texture size limit reached (" << state.t_d.full_texture_width_ << " x " << state.t_d.full_texture_height_ << ")" << std::endl;
break;
}
state.t_d.full_texture_width_ *= 2;
state.t_d.full_texture_height_ *= 2;
std::cout << "Not enough pixels! Adjusting final texture size (" << state.t_d.full_texture_width_ << " x " << state.t_d.full_texture_height_ << ")" << std::endl;
calculate_new_chart_tex_space_sizes(state.chart_map, state.triangles, scm::math::vec2i(state.t_d.full_texture_width_, state.t_d.full_texture_height_));
}
}
{
// if output texture is bigger than 8k, create a set if viewports that will be rendered separately
if(state.t_d.full_texture_width_ > state.t_d.render_to_texture_width_ || state.t_d.full_texture_height_ > state.t_d.render_to_texture_height_)
{
// calc num of viewports needed from size of output texture
int viewports_w = std::ceil(state.t_d.full_texture_width_ / state.t_d.render_to_texture_width_); // TODO: looks broken, will never ceil due to int division
int viewports_h = std::ceil(state.t_d.full_texture_height_ / state.t_d.render_to_texture_height_);
scm::math::vec2f viewport_normed_size(1.f / viewports_w, 1.f / viewports_h);
// create a vector of viewports needed
for(int y = 0; y < viewports_h; ++y)
{
for(int x = 0; x < viewports_w; ++x)
{
viewport vp;
vp.normed_dims = viewport_normed_size;
vp.normed_offset = scm::math::vec2f(viewport_normed_size.x * x, viewport_normed_size.y * y);
state.viewports.push_back(vp);
}
}
}
else
{
viewport single_viewport;
single_viewport.normed_offset = scm::math::vec2f(0.f, 0.f);
single_viewport.normed_dims = scm::math::vec2f(1.0, 1.0);
state.viewports.push_back(single_viewport);
}
std::cout << "Created " << state.viewports.size() << " viewports to render multiple output textures" << std::endl;
}
}
// subroutine for error-checking during shader compilation
static GLint compile_shader(const std::string& _src, GLint _shader_type)
{
const char* shader_src = _src.c_str();
GLuint shader = glCreateShader(_shader_type);
glShaderSource(shader, 1, &shader_src, NULL);
glCompileShader(shader);
GLint status;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
if(status == GL_FALSE)
{
GLint log_length;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length);
GLchar* log = new GLchar[log_length + 1];
glGetShaderInfoLog(shader, log_length, NULL, log);
const char* type = NULL;
switch(_shader_type)
{
case GL_VERTEX_SHADER:
type = "vertex";
break;
case GL_FRAGMENT_SHADER:
type = "fragment";
break;
default:
break;
}
std::string error_message = "ERROR: Compile shader failure in " + std::string(type) + " shader:\n" + std::string(log);
delete[] log;
std::cout << error_message << std::endl;
exit(1);
}
return shader;
}
// compile and link the shader programs
static void make_shader_program(GL_handles& handles)
{
// locates vertices at new uv position on screen
// passes old uvs in order to read from the texture
// compile shaders
GLint vertex_shader = compile_shader(vertex_shader_src, GL_VERTEX_SHADER);
GLint fragment_shader = compile_shader(fragment_shader_src, GL_FRAGMENT_SHADER);
// create the GL resource and save the handle for the shader program
handles.shader_program_ = glCreateProgram();
glAttachShader(handles.shader_program_, vertex_shader);
glAttachShader(handles.shader_program_, fragment_shader);
glLinkProgram(handles.shader_program_);
// since the program is already linked, we do not need to keep the separate shader stages
glDetachShader(handles.shader_program_, vertex_shader);
glDeleteShader(vertex_shader);
glDetachShader(handles.shader_program_, fragment_shader);
glDeleteShader(fragment_shader);
}
// compile and link the shader programs
static void make_dilation_shader_program(GL_handles& handles)
{
// compile shaders
GLint vertex_shader = compile_shader(dilation_vertex_shader_src, GL_VERTEX_SHADER);
GLint fragment_shader = compile_shader(dilation_fragment_shader_src, GL_FRAGMENT_SHADER);
// create the GL resource and save the handle for the shader program
handles.dilation_shader_program_ = glCreateProgram();
glAttachShader(handles.dilation_shader_program_, vertex_shader);
glAttachShader(handles.dilation_shader_program_, fragment_shader);
glLinkProgram(handles.dilation_shader_program_);
// since the program is already linked, we do not need to keep the separate shader stages
glDetachShader(handles.dilation_shader_program_, vertex_shader);
glDeleteShader(vertex_shader);
glDetachShader(handles.dilation_shader_program_, fragment_shader);
glDeleteShader(fragment_shader);
}
static void load_textures(app_state& state)
{
state.area_images.resize(state.viewports.size());
std::cout << "Loading all textures..." << std::endl;
for(auto tex_it : state.texture_info_map)
{
state.textures.push_back(load_image(tex_it.second.filename_));
}
std::cout << "Compiling shaders..." << std::endl;
make_shader_program(state.handles);
make_dilation_shader_program(state.handles);
std::cout << "Creating framebuffers..." << std::endl;
// create output frame buffers
for(int i = 0; i < 2; ++i)
{
state.frame_buffers.push_back(std::make_shared<frame_buffer_t>(1, state.t_d.render_to_texture_width_, state.t_d.render_to_texture_height_, GL_RGBA, GL_LINEAR));
}
// create vertex buffer for dilation
float screen_space_quad_geometry[30]{-1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0, -1.0, 1.0, 0.0, 1.0,
1.0, -1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0};
glGenBuffers(1, &state.handles.dilation_vertex_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, state.handles.dilation_vertex_buffer_);
glBufferData(GL_ARRAY_BUFFER, 6 * 4 * sizeof(float), &screen_space_quad_geometry[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// set the viewport size
glViewport(0, 0, (GLsizei)state.t_d.render_to_texture_width_, (GLsizei)state.t_d.render_to_texture_height_);
// set background colour
glClearColor(1.0f, 0.0f, 1.0f, 1.0f);
glGenBuffers(1, &state.handles.vertex_buffer_);
for(uint32_t view_id = 0; view_id < state.viewports.size(); ++view_id)
{
std::cout << "Rendering into viewport " << view_id << "..." << std::endl;
viewport vport = state.viewports[view_id];
std::cout << "Viewport start: " << vport.normed_offset.x << ", " << vport.normed_offset.y << std::endl;
std::cout << "Viewport size: " << vport.normed_dims.x << ", " << vport.normed_dims.y << std::endl;
state.frame_buffers[0]->enable();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for(uint32_t i = 0; i < state.to_upload_per_texture.size(); ++i)
{
uint32_t num_vertices = state.to_upload_per_texture[i].size();
if(num_vertices == 0)
{
std::cout << "Nothing to render for texture " << i << " (" << state.texture_info_map[i].filename_ << ")" << std::endl;
continue;
}
std::cout << "Rendering from texture " << i << " (" << state.texture_info_map[i].filename_ << ")" << std::endl;
glUseProgram(state.handles.shader_program_);
// upload this vector to GPU
glBindBuffer(GL_ARRAY_BUFFER, state.handles.vertex_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_vertices * sizeof(blit_vertex_t), &state.to_upload_per_texture[i][0], GL_STREAM_DRAW);
// define the layout of the vertex buffer:
// setup 2 attributes per vertex (2x texture coord)
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(blit_vertex_t), (void*)0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(blit_vertex_t), (void*)(2 * sizeof(float)));
// get texture location
int slot = 0;
glUniform1i(glGetUniformLocation(state.handles.shader_program_, "image"), slot);
glUniform2f(glGetUniformLocation(state.handles.shader_program_, "viewport_offset"), vport.normed_offset[0], vport.normed_offset[1]);
glUniform2f(glGetUniformLocation(state.handles.shader_program_, "viewport_scale"), vport.normed_dims[0], vport.normed_dims[1]);
glActiveTexture(GL_TEXTURE0 + slot);
// here, enable the current texture
state.textures[i]->enable(slot);
// draw triangles from the currently bound buffer
glDrawArrays(GL_TRIANGLES, 0, num_vertices);
// unbind, unuse
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
state.textures[i]->disable();
} // end for each texture
state.frame_buffers[0]->disable();
uint32_t current_framebuffer = 0;
std::cout << "Dilating view " << view_id << "..." << std::endl;
uint32_t num_dilations = state.t_d.render_to_texture_width_ / 2;
for(int i = 0; i < num_dilations; ++i)
{
current_framebuffer = (i + 1) % 2;
state.frame_buffers[current_framebuffer]->enable();
int current_texture = 0;
if(current_framebuffer == 0)
{
current_texture = 1;
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(state.handles.dilation_shader_program_);
glBindBuffer(GL_ARRAY_BUFFER, state.handles.dilation_vertex_buffer_);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(blit_vertex_t), (void*)0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(blit_vertex_t), (void*)(2 * sizeof(float)));
int slot = 0;
glUniform1i(glGetUniformLocation(state.handles.dilation_shader_program_, "image"), slot);
glUniform1i(glGetUniformLocation(state.handles.dilation_shader_program_, "image_width"), state.t_d.render_to_texture_width_);
glUniform1i(glGetUniformLocation(state.handles.dilation_shader_program_, "image_height"), state.t_d.render_to_texture_height_);
glActiveTexture(GL_TEXTURE0 + slot);
state.frame_buffers[current_texture]->bind_texture(slot);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
state.frame_buffers[current_texture]->unbind_texture(slot);
state.frame_buffers[current_framebuffer]->disable();
}
std::vector<uint8_t> pixels;
state.frame_buffers[current_framebuffer]->get_pixels(0, pixels);
state.area_images[view_id] = pixels;
} // end for each viewport
}
static void produce_texture(app_state& state, std::string& obj_filename, cmd_options& opt)
{
if(!opt.want_raw_file)
{
// concatenate all area images to one big texture
uint32_t num_bytes_per_pixel = 4;
std::vector<uint8_t> final_texture(state.t_d.full_texture_width_ * state.t_d.full_texture_height_ * num_bytes_per_pixel);
uint32_t num_lookups_per_line = state.t_d.full_texture_width_ / state.t_d.render_to_texture_width_;
for(uint32_t y = 0; y < state.t_d.full_texture_height_; ++y)
{ // for each line
for(uint32_t tex_x = 0; tex_x < num_lookups_per_line; ++tex_x)
{
void* dst = ((void*)&final_texture[0]) + y * state.t_d.full_texture_width_ * num_bytes_per_pixel + tex_x * state.t_d.render_to_texture_width_ * num_bytes_per_pixel;
uint32_t tex_y = y / state.t_d.render_to_texture_height_;
uint32_t tex_id = tex_y * num_lookups_per_line + tex_x;
void* src = ((void*)&state.area_images[tex_id][0]) + (y % state.t_d.render_to_texture_height_) * state.t_d.render_to_texture_width_ * num_bytes_per_pixel; //+ 0;
memcpy(dst, src, state.t_d.render_to_texture_width_ * num_bytes_per_pixel);
}
}
std::string image_filename = obj_filename.substr(0, obj_filename.size() - 4) + "_texture.png";
utils::save_image(image_filename, final_texture, state.t_d.full_texture_width_, state.t_d.full_texture_height_);
}
else
{
std::ofstream raw_file;
std::string image_filename =
obj_filename.substr(0, obj_filename.size() - 4) + "_rgba_w" + std::to_string(state.t_d.full_texture_width_) + "_h" + std::to_string(state.t_d.full_texture_height_) + ".data";
raw_file.open(image_filename, std::ios::out | std::ios::trunc | std::ios::binary);
// concatenate all area images to one big texture
uint32_t num_bytes_per_pixel = 4;
uint32_t num_lookups_per_line = state.t_d.full_texture_width_ / state.t_d.render_to_texture_width_;
for(uint32_t y = 0; y < state.t_d.full_texture_height_; ++y)
{ // for each line
for(uint32_t tex_x = 0; tex_x < num_lookups_per_line; ++tex_x)
{
uint32_t tex_y = y / state.t_d.render_to_texture_height_;
uint32_t tex_id = tex_y * num_lookups_per_line + tex_x;
char* src = ((char*)&state.area_images[tex_id][0]) + (y % state.t_d.render_to_texture_height_) * state.t_d.render_to_texture_width_ * num_bytes_per_pixel; //+ 0;
raw_file.write(src, state.t_d.render_to_texture_width_ * num_bytes_per_pixel);
}
}
raw_file.close();
}
}
} // namespace utils
#endif
|
GPUCommonMath.h
|
//**************************************************************************\
//* This file is property of and copyright by the ALICE Project *\
//* ALICE Experiment at CERN, All rights reserved. *\
//* *\
//* Primary Authors: Matthias Richter <[email protected]> *\
//* for The ALICE HLT Project. *\
//* *\
//* Permission to use, copy, modify and distribute this software and its *\
//* documentation strictly for non-commercial purposes is hereby granted *\
//* without fee, provided that the above copyright notice appears in all *\
//* copies and that both the copyright notice and this permission notice *\
//* appear in the supporting documentation. The authors make no claims *\
//* about the suitability of this software for any purpose. It is *\
//* provided "as is" without express or implied warranty. *\
//**************************************************************************
/// \file GPUCommonMath.h
/// \author David Rohr, Sergey Gorbunov
#ifndef GPUCOMMONMATH_H
#define GPUCOMMONMATH_H
#include "GPUCommonDef.h"
#if !defined(__OPENCL__)
#include <cmath>
#include <algorithm>
#endif
#if !defined(__OPENCL__) || defined(__OPENCLCPP__)
namespace GPUCA_NAMESPACE
{
namespace gpu
{
#endif
class GPUCommonMath
{
public:
GPUhdni() static float2 MakeFloat2(float x, float y);
template <class T>
GPUhd() static T Min(T x, T y);
template <class T>
GPUhd() static T Max(T x, T y);
GPUhdni() static float Sqrt(float x);
template <class T>
GPUhd() static T Abs(T x);
GPUhdni() static float ASin(float x);
GPUhdni() static float ATan(float x);
GPUhdni() static float ATan2(float y, float x);
GPUhdni() static float Sin(float x);
GPUhdni() static float Cos(float x);
GPUhdni() static float Tan(float x);
GPUhdni() static float Copysign(float x, float y);
GPUhdni() static float TwoPi() { return 6.28319f; }
GPUhdni() static float Pi() { return 3.1415926535897f; }
GPUhdni() static int Nint(float x);
GPUhdni() static bool Finite(float x);
GPUhdni() static unsigned int Clz(unsigned int val);
GPUhdni() static float Log(float x);
GPUd() static unsigned int AtomicExch(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static unsigned int AtomicAdd(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static void AtomicMax(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static void AtomicMin(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static unsigned int AtomicExchShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static unsigned int AtomicAddShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static void AtomicMaxShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static void AtomicMinShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val);
GPUd() static int Mul24(int a, int b);
GPUd() static float FMulRZ(float a, float b);
};
typedef GPUCommonMath CAMath;
#if defined(GPUCA_GPUCODE_DEVICE) && (defined(__CUDACC__) || defined(__HIPCC__)) // clang-format off
#define CHOICE(c1, c2, c3) c2 // Select second option for CUDA and HIP
#elif defined(GPUCA_GPUCODE_DEVICE) && defined (__OPENCL__)
#define CHOICE(c1, c2, c3) c3 // Select third option for OpenCL
#else
#define CHOICE(c1, c2, c3) c1 //Select first option for Host
#endif // clang-format on
GPUhdi() float2 GPUCommonMath::MakeFloat2(float x, float y)
{
#if !defined(GPUCA_GPUCODE) || defined(__OPENCL__)
float2 ret = {x, y};
return ret;
#else
return make_float2(x, y);
#endif // GPUCA_GPUCODE
}
GPUhdi() int GPUCommonMath::Nint(float x)
{
int i;
if (x >= 0) {
i = int(x + 0.5f);
if (x + 0.5f == float(i) && i & 1)
i--;
} else {
i = int(x - 0.5f);
if (x - 0.5f == float(i) && i & 1)
i++;
}
return i;
}
GPUhdi() bool GPUCommonMath::Finite(float x) { return CHOICE(std::isfinite(x), true, true); }
GPUhdi() float GPUCommonMath::ATan(float x) { return CHOICE(atanf(x), atanf(x), atan(x)); }
GPUhdi() float GPUCommonMath::ATan2(float y, float x) { return CHOICE(atan2f(y, x), atan2f(y, x), atan2(y, x)); }
GPUhdi() float GPUCommonMath::Sin(float x) { return CHOICE(sinf(x), sinf(x), sin(x)); }
GPUhdi() float GPUCommonMath::Cos(float x) { return CHOICE(cosf(x), cosf(x), cos(x)); }
GPUhdi() float GPUCommonMath::Tan(float x) { return CHOICE(tanf(x), tanf(x), tan(x)); }
GPUhdi() unsigned int GPUCommonMath::Clz(unsigned int x)
{
#if (defined(__GNUC__) || defined(__clang__) || defined(__CUDACC__) || defined(__HIPCC__)) && (!defined(__OPENCL__) || defined(__OPENCLCPP__))
return CHOICE(__builtin_clz(x), __clz(x), __builtin_clz(x)); // use builtin if available
#else
for (int i = 31; i >= 0; i--) {
if (x & (1 << i))
return (31 - i);
}
return 32;
#endif
}
template <class T>
GPUhdi() T GPUCommonMath::Min(T x, T y)
{
return CHOICE(std::min(x, y), std::min(x, y), (x < y ? x : y));
}
template <class T>
GPUhdi() T GPUCommonMath::Max(T x, T y)
{
return CHOICE(std::max(x, y), std::max(x, y), (x > y ? x : y));
}
GPUhdi() float GPUCommonMath::Sqrt(float x) { return CHOICE(sqrtf(x), sqrtf(x), sqrt(x)); }
template <>
GPUhdi() float GPUCommonMath::Abs<float>(float x)
{
return CHOICE(fabsf(x), fabsf(x), fabs(x));
}
#if !defined(__OPENCL__) || defined(cl_khr_fp64)
template <>
GPUhdi() double GPUCommonMath::Abs<double>(double x)
{
return CHOICE(fabs(x), fabs(x), fabs(x));
}
#endif
template <>
GPUhdi() int GPUCommonMath::Abs<int>(int x)
{
return CHOICE(abs(x), abs(x), abs(x));
}
GPUhdi() float GPUCommonMath::ASin(float x) { return CHOICE(asinf(x), asinf(x), asin(x)); }
GPUhdi() float GPUCommonMath::Log(float x) { return CHOICE(logf(x), logf(x), log(x)); }
GPUhdi() float GPUCommonMath::Copysign(float x, float y)
{
#if defined(__OPENCLCPP__)
return copysign(x, y);
#elif defined(GPUCA_GPUCODE) && !defined(__OPENCL__)
return copysignf(x, y);
#elif defined(__cplusplus) && __cplusplus >= 201103L
return std::copysignf(x, y);
#else
x = GPUCommonMath::Abs(x);
return (y >= 0) ? x : -x;
#endif // GPUCA_GPUCODE
}
#if defined(__OPENCL__) && (!defined(__OPENCLCPP__) || (defined(__clang__) && !defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS)))
GPUdi() unsigned int GPUCommonMath::AtomicExchShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
return ::atomic_xchg(addr, val);
}
GPUdi() unsigned int GPUCommonMath::AtomicAddShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { return ::atomic_add(addr, val); }
GPUdi() void GPUCommonMath::AtomicMaxShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { ::atomic_max(addr, val); }
GPUdi() void GPUCommonMath::AtomicMinShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { ::atomic_min(addr, val); }
#else
GPUdi() unsigned int GPUCommonMath::AtomicExchShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
return GPUCommonMath::AtomicExch(addr, val);
}
GPUdi() unsigned int GPUCommonMath::AtomicAddShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { return GPUCommonMath::AtomicAdd(addr, val); }
GPUdi() void GPUCommonMath::AtomicMaxShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMax(addr, val); }
GPUdi() void GPUCommonMath::AtomicMinShared(GPUsharedref() GPUAtomic(unsigned int) * addr, unsigned int val) { GPUCommonMath::AtomicMin(addr, val); }
#endif
#ifndef GPUCA_GPUCODE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-value" // GCC BUG in omp atomic capture gives false warning
#endif
GPUdi() unsigned int GPUCommonMath::AtomicExch(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
return ::atomic_exchange(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
return ::atomic_xchg(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
return ::atomicExch(addr, val);
#else
unsigned int old;
#ifdef GPUCA_HAVE_OPENMP
#pragma omp atomic capture
#endif
{
old = *addr;
*addr = val;
}
return old;
#endif // GPUCA_GPUCODE
}
GPUdi() unsigned int GPUCommonMath::AtomicAdd(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
return ::atomic_fetch_add(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
return ::atomic_add(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
return ::atomicAdd(addr, val);
#else
unsigned int old;
#ifdef GPUCA_HAVE_OPENMP
#pragma omp atomic capture
#endif
{
old = *addr;
*addr += val;
}
return old;
#endif // GPUCA_GPUCODE
}
GPUdi() void GPUCommonMath::AtomicMax(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
::atomic_fetch_max(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
::atomic_max(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
::atomicMax(addr, val);
#else
#ifdef GPUCA_HAVE_OPENMP
while (*addr < val)
AtomicExch(addr, val);
#else
if (*addr < val)
*addr = val;
#endif
#endif // GPUCA_GPUCODE
}
GPUdi() void GPUCommonMath::AtomicMin(GPUglobalref() GPUAtomic(unsigned int) * addr, unsigned int val)
{
#if defined(GPUCA_GPUCODE) && defined(__OPENCLCPP__) && (!defined(__clang__) || defined(GPUCA_OPENCL_CPP_CLANG_C11_ATOMICS))
::atomic_fetch_min(addr, val);
#elif defined(GPUCA_GPUCODE) && defined(__OPENCL__)
::atomic_min(addr, val);
#elif defined(GPUCA_GPUCODE) && (defined(__CUDACC__) || defined(__HIPCC__))
::atomicMin(addr, val);
#else
#ifdef GPUCA_HAVE_OPENMP
while (*addr > val)
AtomicExch(addr, val);
#else
if (*addr > val)
*addr = val;
#endif
#endif // GPUCA_GPUCODE
}
#ifndef GPUCA_GPUCODE
#pragma GCC diagnostic pop
#endif
#undef CHOICE
#if !defined(__OPENCL__) || defined(__OPENCLCPP__)
}
}
#endif
#endif // GPUCOMMONMATH_H
|
host_targ.c
|
#include <stdio.h>
#include <omp.h>
int arr[100];
int nt =12;
int iid, gid, gdd, gdn;
int main()
{
fprintf(stderr, "Omp host get_num_devices %d\n", omp_get_num_devices());
#pragma omp target map(tofrom: iid, gid, gdd, gdn)
{ iid = omp_is_initial_device();
gid = omp_get_initial_device();
gdd = omp_get_default_device();
gdn = omp_get_device_num();
}
fprintf(stderr, "Omp target omp_is_initial_device %d\n", iid);
fprintf(stderr, "Omp target omp_get_initial_device %d\n", gid);
fprintf(stderr, "Omp target omp_get_default_device %d\n", gdd);
fprintf(stderr, "Omp target omp_get_device_num %d\n", gdn);
#pragma omp target teams distribute parallel for num_threads(nt)
for (int i=0; i<100;i++)
arr[i] =i;
//Verify
int errors = 0;
for (int i=0; i<100;i++){
if(arr[i] != i)
errors++;
}
if(!errors){
fprintf(stderr, "Success\n");
return 0;
} else{
fprintf(stderr, "Failed\nErrors: %d\n", errors);
return 1;
}
}
|
kthvalue_op.h
|
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, typename Type>
static void getKthvalue(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out,
Type* t_indices, const int& k) {
bool partial_sort_flag = (k * 64) < input_width;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
if (partial_sort_flag) {
std::partial_sort(
col_vec.begin(), col_vec.begin() + k, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
} else {
std::nth_element(
col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
}
t_out[i] = col_vec[k - 1].first;
t_indices[i] = col_vec[k - 1].second;
}
}
template <typename T, typename Type>
static void kthvalueAssign(const Type& input_height, const Type& input_width,
const int& input_dim, const framework::Tensor* input,
const framework::Tensor* indices, T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
auto e_indices = framework::EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices =
framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
template <typename DeviceContext, typename T>
class KthvalueCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("X");
auto* output = context.Output<framework::Tensor>("Out");
auto* indices = context.Output<framework::Tensor>("Indices");
const auto& in_dims = input->dims();
int k = static_cast<int>(context.Attr<int>("k"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
int axis = static_cast<int>(context.Attr<int>("axis"));
if (axis < 0) axis += in_dims.size();
T* output_data = output->mutable_data<T>(context.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace());
auto out_dims = output->dims();
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(), input,
output_data, indices_data, k);
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
framework::DDim tmp_out_dims = pten::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
framework::DDim trans_dims(in_dims);
framework::DDim trans_out_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
framework::Tensor trans_inp;
trans_inp.mutable_data<T>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input,
&trans_inp, trans);
const int64_t input_height =
pten::product(pten::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
framework::Tensor tmp_out, tmp_indices;
T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace());
auto* t_ind =
tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace());
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_inp, t_out, t_ind, k);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, tmp_indices, indices, trans);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
};
template <typename DeviceContext, typename T>
class KthvalueGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<framework::Tensor>("X");
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<framework::Tensor>("Indices");
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
int axis = static_cast<int>(context.Attr<int>("axis"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
auto in_dims = x->dims();
auto out_dims = indices->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(out_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(out_dims[i - 1]);
}
out_dims = pten::make_ddim(tmp_out_shape);
}
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
if (axis == in_dims.size() - 1) {
const int64_t input_height =
pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
memset(x_grad_data, 0, x_grad->numel() * sizeof(T));
if (keepdim) {
kthvalueAssign(input_height, input_width, in_dims.size(), out_grad,
indices, x_grad_data);
} else {
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
kthvalueAssign(input_height, input_width, in_dims.size(), &out_grad_tmp,
&indices_tmp, x_grad_data);
}
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(out_dims.size() - 1);
for (int i = axis + 1; i < out_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
framework::DDim trans_dims(out_dims);
framework::DDim trans_in_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = out_dims[trans[i]];
trans_in_dims[i] = in_dims[trans[i]];
}
framework::Tensor trans_dO, trans_ind;
trans_dO.mutable_data<T>(trans_dims, context.GetPlace());
trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
if (keepdim) {
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, *out_grad, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, *indices, &trans_ind, trans);
} else {
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, out_grad_tmp, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, indices_tmp, &trans_ind, trans);
}
const int64_t input_height = pten::product(
pten::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1));
const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1];
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace());
memset(t_out, 0, x_grad->numel() * sizeof(T));
kthvalueAssign<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_dO, &trans_ind, t_out);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
x_grad, trans);
}
}
};
} // namespace operators
} // namespace paddle
|
GB_unaryop__minv_fp32_uint64.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint64
// op(A') function: GB_tran__minv_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint64
(
float *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
THTensorMath.c
|
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#define TH_OMP_OVERHEAD_THRESHOLD 100000
void THTensor_(fill)(THTensor *r_, real value)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, value, r__size); break;);
}
void THTensor_(zero)(THTensor *r_)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, 0, r__size); break;);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1) THError("Mask tensor can take 0 and 1 values only");
else if (*mask_data == 1) *tensor_data = value;);
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
long cntr = 0;
long nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem) {
THTensor_(free)(srct);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
long numel = THByteTensor_sumall(mask);
real *tensor_data;
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
long numel = 0;
long *subscript_data;
long i = 0;
long dim;
long div = 1;
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data != 0) {
++numel;
});
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data != 0) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1);
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
long rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
long max = src->size[0];
for (i=0; i<numel; i++) {
if (index_data[i] < 1 || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i]-1];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i]-1)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i]-1));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i]-1);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i]-1);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i]-1);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor,index_data[i]-1,val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long elems_per_row, i, idx;
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < 1 || idx > src_size) THError("Invalid index in gather");
*(tensor_data + i*tensor_stride) = src_data[(idx - 1) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < 1 || idx > tensor_size) THError("Invalid index in scatter");
tensor_data[(idx - 1) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < 1 || idx > tensor_size) THError("Invalid index in scatter");
tensor_data[(idx - 1) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
if (isnan(value))
break;
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
if (isnan(value))
break;
});
return theMax;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] + value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
real t_val;
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i< sz; i++)
rp[i] = tp[i] + value * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(tp[i], sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(value, tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
/* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THTensor_(newWithSize2d)(r_->size[1], r_->size[0]);
THTensor_(copy)(r__, r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
THTensor_(copy)(result, t);
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
long bs = THTensor_(size)(batch1, 0);
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
THTensor_(copy)(result, t);
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
long THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
real theMax;
real value;
long theIndex;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
if (isnan(value))
break;
}
}
*indices__data = theIndex;
*values__data = theMax;);
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
real theMin;
real value;
long theIndex;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMin = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theIndex = i;
theMin = value;
if (isnan(value))
break;
}
}
*indices__data = theIndex;
*values__data = theMin;);
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension+1);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension+1);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension+1);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
long size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (long)((xmax/step - xmin/step)+1);
THTensor_(resize1d)(r_, size);
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension+1);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride)
{
long P, L, R, i, j, swap, pid;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
pid = IDX(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k >= 0 && k < t->size[dimension], 2, "selected index out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k, t_size_dim, 1);
*values__data = temp__data[k];
*indices__data = tempi__data[k];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
long t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k, dimension);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THLongStorage *size;
int i;
int ndim = THMax(ta->nDimension, tb->nDimension);
ndim = THMax(ndim, dimension+1);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension+1);
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
long tadi = (i < ta->nDimension ? ta->size[i] : 1);
long tbdi = (i < tb->nDimension ? tb->size[i] : 1);
if(i == dimension)
size->data[i] = tadi+tbdi;
else
{
if(tadi != tbdi)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
size->data[i] = tadi;
}
}
THTensor_(resize)(r_, size, NULL);
THLongStorage_free(size);
{
THTensor *nta = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(nta, NULL, dimension, 0, (dimension < ta->nDimension ? ta->size[dimension] : 1));
THTensor_(copy)(nta, ta);
THTensor_(free)(nta);
}
{
THTensor *ntb = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(ntb, NULL, dimension, (dimension < ta->nDimension ? ta->size[dimension] : 1), (dimension < tb->nDimension ? tb->size[dimension] : 1));
THTensor_(copy)(ntb, tb);
THTensor_(free)(ntb);
}
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY2(real, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
} \
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
#endif /* long only part */
#if defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &=, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, |=, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
LAB_IMPLEMENT_BASIC_FUNCTION(log,log)
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p)
LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp)
LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos)
LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos)
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh)
LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin)
LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin)
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh)
LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan)
LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan)
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh)
LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt)
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil)
LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor)
LAB_IMPLEMENT_BASIC_FUNCTION(round,round)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs)
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum/t_size;);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
});
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension+1);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += pow(fabs(t_data[i*t_stride]), value);
*r__data = pow(sum, 1.0/value);)
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += fabs(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else {
TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value););
return pow(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension+1);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += pow(fabs(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += pow(fabs(*tensor_data - *src_data), value);)
return pow(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= (THTensor_(nElement)(tensor)-1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor)
{
return sqrt(THTensor_(varall)(tensor));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = a;
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a);
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
THTensor *clone;
real minval;
real maxval;
real bins;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
bins = (real)(nbins)-1e-6;
clone = THTensor_(newWithSize1d)(THTensor_(nElement)(tensor));
THTensor_(copy)(clone,tensor);
THTensor_(add)(clone, clone, -minval);
THTensor_(div)(clone, clone, (maxval-minval));
THTensor_(mul)(clone, clone, bins);
THTensor_(floor)(clone, clone);
THTensor_(add)(clone, clone, 1);
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, clone, \
if ((*clone_data <= nbins) && (*clone_data >= 1)) { \
*(h_data + (int)(*clone_data) - 1) += 1; \
});
THTensor_(free)(clone);
}
#endif /* floating point only part */
#endif
|
DRB095-doall2-taskloop-orig-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with omp taskloop.
The inner loop's loop iteration variable will be shared if it is shared in the enclosing context.
Data race pairs (we allow multiple ones to preserve the pattern):
Write_set = {j@69:14, j@69:30}
Read_set = {j@69:21, j@69:30, j@70:16}
Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair.
*/
#if (_OPENMP<201511)
#error "An OpenMP 4.5 compiler is needed to compile this test."
#endif
#include <stdio.h>
int a[100][100];
int main()
{
int i, j;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp taskloop
for (i = 0; i < 100; i++)
for (j = 0; j < 100; j++)
a[i][j]+=1;
}
}
printf ("a[50][50]=%d\n", a[50][50]);
return 0;
}
|
ising.h
|
#ifndef _ISING_H
#define _ISING_H
#include <iostream>
#include <tuple>
#include <array>
#include <vector>
#include <bitset>
#include <algorithm>
#include <random>
#include <string>
#include <memory>
#include <limits>
#include <type_traits>
#ifdef _OPENMP
#include <valarray>
#include <omp.h>
#endif
#include "asa.h"
#include "arithmeticvector.h"
namespace ising{
template<size_t N> double isingEnergy (void* state);
template<size_t N> double isingMeasure(void* stateI,
void* stateJ);
template<size_t N> void isingStep (const gsl_rng* random,
void* state,
double step);
template<size_t N> void isingPrint (void* state);
template<size_t N, class Model>
struct LatticeType{
LatticeType() = delete;
LatticeType(const LatticeType& rhs);
LatticeType& operator=(const LatticeType& rhs);
LatticeType(std::shared_ptr<std::mt19937>& _randomEngine,
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>>& _uniform,
const Model* _model,
std::bitset<N>* _nodes = nullptr);
std::shared_ptr<std::mt19937> randomEngine;
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>> uniform;
const Model* model;
std::bitset<N> nodes;
size_t flipper;
};
class AbstractIsingModel{
public:
virtual ~AbstractIsingModel() = 0;
};
#ifdef _OPENMP
//template<size_t N, size_t numOfThreads> // N-site lattie
template<size_t N> // N-site lattie
#else
template<size_t N> // N-site lattie
#endif
class IsingModel : public AbstractIsingModel{
public:
IsingModel(bool QUICK_START=true):
randomDevice(),
randomEngine(std::make_shared<std::mt19937>(randomDevice())),
uniform(std::make_shared<
std::uniform_int_distribution<
unsigned long long int>>(
std::numeric_limits<
unsigned long long int>::min(),
std::numeric_limits<
unsigned long long int>::max())),
lattice(randomEngine,uniform,this){
if(QUICK_START) hamiltonian.reserve(N*N/2); // maximum number of interactions (each site with all others)
else hamiltonian.reserve(N); // minimum number of interactions (one per site)
solver.set_energy ( isingEnergy<N> );
solver.set_measure( isingMeasure<N> );
solver.set_step ( isingStep<N> );
#ifdef _VERBOSE
solver.set_print ( isingPrint<N> );
#endif
#ifdef _OPENMP
// energy = std::valarray<double>(0.0,numOfThreads);
// probablity = std::valarray<double>(0.0,numOfThreads);
#endif
}
~IsingModel() override {}
typedef std::tuple<unsigned,unsigned,double> TwoSiteInteraction;
typedef std::vector<TwoSiteInteraction> HamiltonianType;
typedef celerium::ArithmeticVector VectorType;
protected:
std::random_device randomDevice;
std::shared_ptr<std::mt19937> randomEngine;
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>> uniform;
LatticeType<N,IsingModel> lattice;
gsl::SimulatedAnnealing solver;
HamiltonianType hamiltonian;
std::array<VectorType,3> basis;
std::vector<std::tuple<size_t,VectorType,double>> supercell;
size_t referencePoint;
const std::bitset<N>* mask;
#ifdef _OPENMP
// std::valarray<double> energy;
// std::valarray<double> probablity;
#endif
public:
typename gsl::SimulatedAnnealing::Parameters&
set_parameters(const typename gsl::SimulatedAnnealing::Parameters& _params){
return solver.set_parameters(_params);
}
void set_basis(const std::array<VectorType,3>& _basis){
basis = _basis;
}
void set_supercell(const std::vector<std::tuple<size_t,VectorType,double>>& _supercell){
supercell = _supercell;
}
void set_reference(size_t _reference){
referencePoint = _reference;
}
static unsigned randomize(std::bitset<N>& state,
std::shared_ptr<std::mt19937>& randomEngine,
std::shared_ptr<
std::uniform_int_distribution
<unsigned long long int>>& uniform,
const std::bitset<N>* mask=nullptr){
unsigned ones = (*uniform)(*randomEngine)%static_cast<size_t>(sqrt(N));
unsigned zeros = N - ones;
std::string face(ones,'1');
face += std::string(zeros,'0');
std::shuffle(face.begin(),face.end(),*randomEngine);
state ^= std::bitset<N>(face);
if(mask != nullptr) state &= *mask; // if mask exist, don't flip marked spins
return 0;
}
static unsigned randomize(std::bitset<N>& state,
std::mt19937& randomEngine,
size_t maxNumberOfFlips=
static_cast<size_t>(1.0+2.0*log(N)),
const std::bitset<N>* mask=nullptr){
unsigned ones = std::min(maxNumberOfFlips,N);
unsigned zeros = N - ones;
std::string face(ones,'1');
face += std::string(zeros,'0');
std::shuffle(face.begin(),face.end(),randomEngine);
state ^= std::bitset<N>(face);
if(mask != nullptr) state &= *mask; // if mask exist, don't flip marked spins
return 0;
}
const std::bitset<N>* get_mask() const{
return mask;
}
protected:
// For SFINAE compiler-time evaulation
template<class T>
T tester(T t)const{
if(std::is_integral<T>::value) return static_cast<unsigned>(t);
return t;
}
public:
void add_interaction(...){
std::cerr<<"Wrong input for auxiliary::IsingModel::add_interaction:"<<std::endl;
std::cerr<<" Either non-iterable or iterable of non <int,int,float> tuples."<<std::endl;
std::cerr<<" Nothing was added!"<<std::endl;
}
template<class intlike, class floatlike>
auto add_interaction(intlike i, intlike j, floatlike J) -> decltype((unsigned)(tester<intlike>)(i),void()){
hamiltonian.push_back(std::make_tuple(i,j,J));
}
template<class T>
auto add_interaction(T interaction) -> decltype((TwoSiteInteraction&)(tester<T>)(interaction),void()){
hamiltonian.push_back(interaction);
}
template<class Iterable>
auto add_interaction(const Iterable& interactions) -> decltype((decltype(interactions.begin()))(std::begin)(interactions),void()){
for(auto& interaction : interactions)
hamiltonian.push_back(interaction);
}
std::bitset<N>* get_nodes_ptr(){
return &(lattice.nodes);
}
void clear_hamiltonian(){
hamiltonian.clear();
}
void reset(){
this->clear_hamiltonian();
this->randomize_state();
}
void randomize_state(){
IsingModel<N>::generate_state(lattice.nodes,*(lattice.randomEngine),*(lattice.uniform));
}
const std::bitset<N>& get_nodes() const{
return lattice.nodes;
}
const HamiltonianType& get_hamiltonian() const{
return hamiltonian;
}
std::bitset<N>& get_nodes(){
return lattice.nodes;
}
friend std::ostream& operator<<(std::ostream& stream, const IsingModel& model){
return stream<<model.get_nodes();
}
static double energy(const LatticeType<N,IsingModel>* state){
double E = 0.0;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp for reduction(+:E)
#endif
for(size_t i = 0U; i < state->model->get_hamiltonian().size(); ++i){
E += std::get<2>(state->model->get_hamiltonian()[i])
*(state->nodes[std::get<0>(state->model->get_hamiltonian()[i])]-0.5)
*(state->nodes[std::get<1>(state->model->get_hamiltonian()[i])]-0.5);
}
#ifdef _OPENMP
}
#endif
return E;
}
static double measure(const std::bitset<N>& stateI, const std::bitset<N>& stateJ){
std::bitset<N> output = ~stateI & stateJ;
return output.count();
}
std::bitset<N> run(){
this->mask = nullptr;
#ifdef _VERBOSE
std::cout<<"Starting from: "<<lattice.nodes<<std::endl;
#endif
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
#ifdef _VERBOSE
std::cout<<"Solution: ";
std::cout<<lattice.nodes<<std::endl;
#endif
return lattice.nodes;
}
#ifdef _VERBOSE
std::bitset<N> run(std::bitset<N>* mask){
this->mask = mask;
lattice.nodes &= *mask;
std::cout<<"Starting from: "<<lattice.nodes<<std::endl;
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
std::cout<<"Solution: ";
std::cout<<lattice.nodes<<std::endl;
return lattice.nodes;
}
#else
std::bitset<N> run(std::bitset<N>* mask){
this->mask = mask;
lattice.nodes &= *mask;
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
return lattice.nodes;
}
#endif
static void generate_state(std::bitset<N>& state,
std::mt19937& engine,
std::uniform_int_distribution
<unsigned long long int>& distribution){
constexpr auto seedSize = 8*sizeof(unsigned long long int);
state = std::bitset<N>(distribution(engine));
auto currentSize = seedSize;
while (currentSize < N){
state <<= seedSize;
state |= std::bitset<N>(distribution(engine));
currentSize += seedSize;
}
}
}; // end of class IsingModel
template<size_t N>
double isingEnergy (void* state){
return IsingModel<N>::energy(static_cast<LatticeType<N,IsingModel<N>>*>(state));
}
template<size_t N>
double isingMeasure(void* stateI, void* stateJ){
return IsingModel<N>::measure(
static_cast<LatticeType<N,IsingModel<N>>*>(stateI)->nodes,
static_cast<LatticeType<N,IsingModel<N>>*>(stateJ)->nodes
);
}
template<size_t N>
void isingStep (const gsl_rng* random __attribute__((unused)), void* state, double step __attribute__((unused))){
IsingModel<N>::randomize(
static_cast<LatticeType<N,IsingModel<N>>*>(state)->nodes,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->randomEngine,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->uniform,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->model->get_mask()
);
}
template<size_t N>
void isingPrint (void* state){
#ifndef _QUIET
std::cout<<'\t'<<static_cast<LatticeType<N,IsingModel<N>>*>(state)->nodes;
#endif
}
template<size_t N, class Model>
LatticeType<N,Model>::LatticeType(const LatticeType<N,Model>& rhs){
randomEngine = rhs.randomEngine;
uniform = rhs.uniform;
nodes = rhs.nodes;
model = rhs.model;
}
template<size_t N, class Model>
LatticeType<N,Model>& LatticeType<N,Model>::operator=(const LatticeType<N,Model>& rhs){
randomEngine = rhs.randomEngine;
uniform = rhs.uniform;
nodes = rhs.nodes;
model = rhs.model;
}
template<size_t N, class Model>
LatticeType<N,Model>::LatticeType(
std::shared_ptr<std::mt19937>& _randomEngine,
std::shared_ptr<
std::uniform_int_distribution
<unsigned long long int>>& _uniform,
const Model* _model,
std::bitset<N>* _nodes):randomEngine(_randomEngine),
uniform(_uniform),
model(_model){
if (_nodes == nullptr)
IsingModel<N>::generate_state(nodes,*_randomEngine,*_uniform);
else
nodes = *_nodes;
}
} //end of namespace ising
#endif
|
dng_simd_type.h
|
/*****************************************************************************/
// Copyright 2017-2019 Adobe Systems Incorporated
// All Rights Reserved.
//
// NOTICE: Adobe permits you to use, modify, and distribute this file in
// accordance with the terms of the Adobe license agreement accompanying it.
/*****************************************************************************/
#ifndef __dng_simd_type__
#define __dng_simd_type__
/*****************************************************************************/
#include "dng_flags.h"
/*****************************************************************************/
#if qDNGIntelCompiler
#include <immintrin.h>
#endif // qDNGIntelCompiler
/*****************************************************************************/
enum SIMDType
{
Scalar,
SSE2, // Pentium 4
AVX, // Sandy Bridge
AVX2, // Haswell
F16C = AVX2, //Ivy bridge
AVX512_SKX, // Sky Lake Server
SIMD_Sentinel
};
/*****************************************************************************/
template <int SIMDType>
class SIMDTraits
{
public:
static const int kVecSizeFloat = 1;
static const int kVecSizeInt32 = 1;
};
template <>
class SIMDTraits<SSE2>
{
public:
static const int kVecSizeFloat = 4;
static const int kVecSizeInt32 = 4;
};
template <>
class SIMDTraits<AVX>
{
public:
static const int kVecSizeFloat = 8;
static const int kVecSizeInt32 = 4;
};
template <>
class SIMDTraits<AVX2>
{
public:
static const int kVecSizeFloat = 8;
static const int kVecSizeInt32 = 8;
};
template <>
class SIMDTraits<AVX512_SKX>
{
public:
static const int kVecSizeFloat = 16;
static const int kVecSizeInt32 = 16;
};
const SIMDType SIMDTypeMaxValue = SIMDType(SIMD_Sentinel - 1);
extern SIMDType gDNGMaxSIMD;
/*****************************************************************************/
#if qDNGIntelCompiler
// Intel compiler.
// Macros are preferred for "#pragma simd" because at some point these will
// all change to OpenMP 4.x compliant "#pragma omp simd" directives (no longer
// Intel-specific).
//
// Note that _Pragma(x) requires C99 or C++11 support.
// Pre-defined feature levels.
#define CR_SIMD_MIN_FEATURE (_FEATURE_SSE2)
#define CR_AVX_FEATURE (_FEATURE_AVX)
#define CR_AVX2_FEATURE (_FEATURE_AVX|_FEATURE_FMA|_FEATURE_AVX2)
#define CR_F16C_FEATURE CR_AVX2_FEATURE
#define CR_AVX512_SKX_FEATURE (_FEATURE_AVX512F|_FEATURE_AVX512CD|_FEATURE_AVX512BW|_FEATURE_AVX512DQ|_FEATURE_AVX512VL)
#define CR_COMPILER_USING_AVX512_SKX (__AVX512F__ && __AVX512VL__ && __AVX512BW__ && __AVX512DQ__ && __AVX512CD__)
#define __SIMDTYPE_TFY(x) #x
#define _SIMDTYPE_TFY(x) __SIMDTYPE_TFY(x)
#if qDNGDebug
// Debug build.
//#define INTEL_PRAGMA_SIMD_ASSERT_C(clause) _Pragma(PM2__STR1__(simd clause))
#define INTEL_PRAGMA_SIMD_ASSERT _Pragma("simd")
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_FLOAT(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeFloat ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT32(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT16(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32*2 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT8(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32*4 ) ))
#else
// Release build.
//#define INTEL_PRAGMA_SIMD_ASSERT_C(clause) _Pragma(PM2__STR1__(simd assert clause))
#define INTEL_PRAGMA_SIMD_ASSERT _Pragma("simd assert")
#if 1
#if (__INTEL_COMPILER < 1800)
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_FLOAT(s) _Pragma(_SIMDTYPE_TFY(simd assert vectorlength( SIMDTraits<s>::kVecSizeFloat ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT32(s) _Pragma(_SIMDTYPE_TFY(simd assert vectorlength( SIMDTraits<s>::kVecSizeInt32 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT16(s) _Pragma(_SIMDTYPE_TFY(simd assert vectorlength( SIMDTraits<s>::kVecSizeInt32*2 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT8(s) _Pragma(_SIMDTYPE_TFY(simd assert vectorlength( SIMDTraits<s>::kVecSizeInt32*4 ) ))
#else
// FIX_ME_ERIC_CHAN: I removed the assert to fix compile time error when using Intel compiler version 18.
// Need to figure out correct fix when we switch to newer version. - tknoll 10/30/2017.
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_FLOAT(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeFloat ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT32(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT16(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32*2 ) ))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT8(s) _Pragma(_SIMDTYPE_TFY(simd vectorlength( SIMDTraits<s>::kVecSizeInt32*4 ) ))
#endif
#else
// Don't force
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_FLOAT(s) _Pragma(_SIMDTYPE_TFY(simd assert))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT32(s) _Pragma(_SIMDTYPE_TFY(simd assert))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT16(s) _Pragma(_SIMDTYPE_TFY(simd assert))
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT8(s) _Pragma(_SIMDTYPE_TFY(simd assert))
#endif
#endif
#define SET_CPU_FEATURE(simd) _allow_cpu_features( (simd >= AVX512_SKX) ? CR_AVX512_SKX_FEATURE : (simd >= AVX2) ? CR_AVX2_FEATURE : ((simd >= AVX) ? CR_AVX_FEATURE : CR_SIMD_MIN_FEATURE) )
//#define SET_CPU_FEATURE_NOFMA(simd) _allow_cpu_features( ((simd >= AVX512_SKX) ? CR_AVX512_SKX_FEATURE : (simd >= AVX2) ? CR_AVX2_FEATURE : ((simd >= AVX) ? CR_AVX_FEATURE : CR_SIMD_MIN_FEATURE)) & ~_FEATURE_FMA )
#define SET_CPU_FEATURE_NOFMA(simd) _allow_cpu_features( (simd >= AVX) ? CR_AVX_FEATURE : CR_SIMD_MIN_FEATURE)
#define INTEL_PRAGMA_NOVECTOR _Pragma("novector")
#define INTEL_COMPILER_NEEDED_NOTE
#else
// Non-Intel compiler. Use empty definitions for the macros.
// Credit: http://www.highprogrammer.com/alan/windev/visualstudio.html, but avoid using $ character
#define Stringize( L ) #L
#define MakeString( M, L ) M(L)
#define _x_Line MakeString( Stringize, __LINE__ )
#if qDNGValidateTarget
// Do not warn about Intel compiler if building dng_validate.
#define INTEL_COMPILER_NEEDED_NOTE
#else
#if !(defined (IOS_ENV) || defined(ANDROID_ENV)) && (defined(__x86_64__) || defined(__i386__))
#ifndef _MSC_VER
#define INTEL_COMPILER_NEEDED_NOTE _Pragma("message(\"NOTE: Intel Compiler needed for optimizations in \" __FILE__ \":\" _x_Line )")
#else
// Intel compiler understands C99 _Pragma, but not Microsoft, so use MS-specific __pragma instead
#define INTEL_COMPILER_NEEDED_NOTE __pragma(message("NOTE: Intel Compiler needed for optimizations in " __FILE__ ":" _x_Line " in " __FUNCTION__))
#endif
#else
#define INTEL_COMPILER_NEEDED_NOTE
#endif
#endif
#define INTEL_PRAGMA_SIMD_ASSERT
//#define INTEL_PRAGMA_SIMD_ASSERT_C(clause)
#define SET_CPU_FEATURE(simd)
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_FLOAT(simd)
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT16(simd)
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT32(simd)
#define INTEL_PRAGMA_SIMD_ASSERT_VECLEN_INT8(simd)
#define INTEL_PRAGMA_NOVECTOR
#endif // qDNGIntelCompiler
/*****************************************************************************/
#endif // __dng_simd_type__
/*****************************************************************************/
|
GB_Scalar_extractElement.c
|
//------------------------------------------------------------------------------
// GB_Scalar_extractElement_template: x = S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Extract the value of single scalar, x = S, typecasting from the
// type of S to the type of x, as needed.
// Returns GrB_SUCCESS if the GrB_Scalar entry is present, and sets x to its
// value. Returns GrB_NO_VALUE if the GrB_Scalar is not present, and x is
// unmodified.
// This template constructs GrB_Scalar_extractElement_[TYPE] for each of the
// 13 built-in types, and the _UDT method for all user-defined types.
GrB_Info GB_EXTRACT_ELEMENT // extract a single entry from S
(
GB_XTYPE *x, // scalar to extract, not modified if not found
const GrB_Scalar S // GrB_Scalar to extract a scalar from
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_RETURN_IF_NULL_OR_FAULTY (S) ;
GB_RETURN_IF_NULL (x) ;
// delete any lingering zombies, assemble any pending tuples, and unjumble
if (GB_ANY_PENDING_WORK (S))
{
// extract scalar with pending tuples or zombies. It cannot be
// actually jumbled, but S->jumbled might true anyway.
GrB_Info info ;
GB_WHERE1 (GB_WHERE_STRING) ;
GB_BURBLE_START ("GrB_Scalar_extractElement") ;
GB_OK (GB_wait ((GrB_Matrix) S, "s", Context)) ;
GB_BURBLE_END ;
}
ASSERT (!GB_ANY_PENDING_WORK (S)) ;
// GB_XCODE and S must be compatible
GB_Type_code scode = S->type->code ;
if (!GB_code_compatible (GB_XCODE, scode))
{
return (GrB_DOMAIN_MISMATCH) ;
}
if (GB_nnz ((GrB_Matrix) S) == 0 // empty
|| (S->p != NULL && S->p [1] == 0) // sparse/hyper with no entry
|| (S->b != NULL && S->b [0] == 0)) // bitmap with no entry
{
// quick return
return (GrB_NO_VALUE) ;
}
//--------------------------------------------------------------------------
// extract the scalar
//--------------------------------------------------------------------------
#if !defined ( GB_UDT_EXTRACT )
if (GB_XCODE == scode)
{
// copy S into x, no typecasting, for built-in types only.
GB_XTYPE *restrict Sx = ((GB_XTYPE *) (S->x)) ;
(*x) = Sx [0] ;
}
else
#endif
{
// typecast S into x
GB_cast_scalar (x, GB_XCODE, S->x, scode, S->type->size) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
#undef GB_UDT_EXTRACT
#undef GB_EXTRACT_ELEMENT
#undef GB_XTYPE
#undef GB_XCODE
|
infectious_test.c
|
/* For license: see LICENSE.txt file at top-level */
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <shmem.h>
#include <math.h>
#include <string.h>
#include <hoover.h>
#ifdef MULTITHREADED
#include <omp.h>
#include <shmemx.h>
#endif
#define TIME_STEP 0
#define ACTOR_ID 1
#define PX 2
#define PY 3
#define INFECTED 4
#define DST_X 5
#define DST_Y 6
#define NEXT_CREATED 7
#define NEXT_ID 8
#define PREV_IS_INFECTED 9
#define PREV_PX 10
#define PREV_PY 11
#define PORTAL_CAPTURE_RADIUS 5.0
#define PE_ROW(this_pe) ((this_pe) / n_cells_x)
#define PE_COL(this_pe) ((this_pe) % n_cells_x)
#define PE_ROW_CELL_START(this_pe) ((double)PE_ROW(this_pe) * cell_dim_y)
#define PE_COL_CELL_START(this_pe) ((double)PE_COL(this_pe) * cell_dim_x)
#define CELL_ROW(y_coord) ((y_coord) / cell_dim_y)
#define CELL_COL(x_coord) ((x_coord) / cell_dim_x)
#define CELL_INDEX(cell_row, cell_col) ((cell_row) * n_cells_x + (cell_col))
int max_modeled_timestep = 0;
size_t n_local_actors = 0;
uint64_t total_n_actors = 0;
static unsigned n_time_partition = 0;
static unsigned n_y_partition = 0;
static unsigned n_x_partition = 0;
#ifdef MULTITHREADED
static int nthreads = 1;
#endif
typedef struct _portal_t {
int pes[2];
struct {
double x, y;
} locations[2];
} portal_t;
static int npes, pe;
static int n_cells_y, n_cells_x;
static double cell_dim_y;
static double cell_dim_x;
static double infection_radius;
static int max_num_timesteps;
/*
* Construct a 2D grid, with one grid cell per PE. Build connections between
* cells, having each cell connected to all eight neighbor cells plus some
* randomly added long distance interactions.
*
* Inside of each cell of the 2D grid, generate a random number of actors that
* perform random walks on the whole grid (possibly jumping to other cells) and
* infect each other if within certain distances.
*/
long long p_wrk[SHMEM_REDUCE_MIN_WRKDATA_SIZE];
int int_p_wrk[SHMEM_REDUCE_MIN_WRKDATA_SIZE];
long p_sync[SHMEM_REDUCE_SYNC_SIZE];
static double distance(double x1, double y1, double x2, double y2) {
double deltax = x2 - x1;
double deltay = y2 - y1;
return sqrt((deltay * deltay) + (deltax * deltax));
}
hvr_edge_type_t should_have_edge(const hvr_vertex_t *base,
const hvr_vertex_t *neighbor, hvr_ctx_t ctx) {
int base_time = (int)hvr_vertex_get(TIME_STEP, base, ctx);
int neighbor_time = (int)hvr_vertex_get(TIME_STEP, neighbor, ctx);
if (abs(base_time - neighbor_time) == 1) {
double deltax = hvr_vertex_get(PREV_PX, neighbor, ctx) -
hvr_vertex_get(PREV_PX, base, ctx);
double deltay = hvr_vertex_get(PREV_PY, neighbor, ctx) -
hvr_vertex_get(PREV_PY, base, ctx);
if (sqrt(deltax * deltax + deltay * deltay) <= infection_radius) {
return (base_time < neighbor_time) ? DIRECTED_OUT : DIRECTED_IN;
}
}
return NO_EDGE;
}
hvr_partition_t actor_to_partition(const hvr_vertex_t *actor, hvr_ctx_t ctx) {
const double timestep = hvr_vertex_get(TIME_STEP, actor, ctx);
const double y = hvr_vertex_get(PREV_PY, actor, ctx);
const double x = hvr_vertex_get(PREV_PX, actor, ctx);
const double global_y_dim = (double)n_cells_y * cell_dim_y;
const double global_x_dim = (double)n_cells_x * cell_dim_x;
assert((int)timestep < max_num_timesteps);
assert(x < global_x_dim);
assert(y < global_y_dim);
const double partition_time_step = (double)max_num_timesteps /
(double)n_time_partition;
const double partition_y_dim = global_y_dim / (double)n_y_partition;
const double partition_x_dim = global_x_dim / (double)n_x_partition;
const hvr_partition_t time_step_partition = (hvr_partition_t)(timestep /
partition_time_step);
const hvr_partition_t y_partition = (hvr_partition_t)(y / partition_y_dim);
const hvr_partition_t x_partition = (hvr_partition_t)(x / partition_x_dim);
assert(time_step_partition < n_time_partition);
assert(x_partition < n_x_partition);
assert(y_partition < n_y_partition);
return time_step_partition * n_y_partition * n_x_partition +
y_partition * n_x_partition + x_partition;
}
static void compute_next_pos(double p_x, double p_y,
double dst_x, double dst_y,
double *next_p_x, double *next_p_y) {
if (fabs(p_x - dst_x) < 1e-9 || fabs(p_y - dst_y) < 1e-9) {
/*
* Seem to have reached destination, set new destination and start
* moving there.
*/
p_x = dst_x;
p_y = dst_y;
}
double vx = dst_x - p_x;
double vy = dst_y - p_y;
const double mag = 5.0 * distance(p_x, p_y, dst_x, dst_y);
const double normalized_vx = vx / mag;
const double normalized_vy = vy / mag;
if (fabs(vx) > fabs(normalized_vx)) vx = normalized_vx;
if (fabs(vy) > fabs(normalized_vy)) vy = normalized_vy;
double new_x = p_x + vx;
double new_y = p_y + vy;
const double global_x_dim = (double)n_cells_x * cell_dim_x;
const double global_y_dim = (double)n_cells_y * cell_dim_y;
if (new_x >= global_x_dim) new_x -= global_x_dim;
if (new_y >= global_y_dim) new_y -= global_y_dim;
if (new_x < 0.0) new_x += global_x_dim;
if (new_y < 0.0) new_y += global_y_dim;
assert(new_x >= 0.0 && new_x < global_x_dim);
assert(new_y >= 0.0 && new_y < global_y_dim);
*next_p_x = new_x;
*next_p_y = new_y;
}
void update_vertex(hvr_vertex_t *vertex, hvr_set_t *couple_with,
hvr_ctx_t ctx) {
const unsigned actor_id = (unsigned)hvr_vertex_get(ACTOR_ID, vertex, ctx);
const unsigned timestep = (unsigned)hvr_vertex_get(TIME_STEP, vertex, ctx);
hvr_vertex_t **verts;
hvr_edge_type_t *dirs;
int n_neighbors = hvr_get_neighbors(vertex, &verts, &dirs, ctx);
hvr_vertex_t prev;
int have_msg = hvr_poll_msg(vertex, &prev, ctx);
if (have_msg) {
// Messages are sorted most recent to least recent
assert((int)hvr_vertex_get(TIME_STEP, &prev, ctx) == timestep - 1);
assert((int)hvr_vertex_get(ACTOR_ID, &prev, ctx) == actor_id);
assert(hvr_vertex_get(PX, &prev, ctx) ==
hvr_vertex_get(PREV_PX, vertex, ctx));
assert(hvr_vertex_get(PY, &prev, ctx) ==
hvr_vertex_get(PREV_PY, vertex, ctx));
hvr_vertex_set(PREV_IS_INFECTED, hvr_vertex_get(INFECTED, &prev, ctx),
vertex, ctx);
// Flush less recent messages to this vertex
do {
have_msg = hvr_poll_msg(vertex, &prev, ctx);
} while (have_msg);
}
int is_infected = (int)hvr_vertex_get(PREV_IS_INFECTED, vertex, ctx);
for (int i = 0; i < n_neighbors && !is_infected; i++) {
if (dirs[i] == DIRECTED_IN) {
hvr_vertex_t *neighbor = verts[i];
assert((int)hvr_vertex_get(TIME_STEP, neighbor, ctx) ==
timestep - 1);
if (hvr_vertex_get(INFECTED, neighbor, ctx)) {
const int infected_by = hvr_vertex_get_owning_pe(neighbor);
hvr_set_insert(infected_by, couple_with);
is_infected = 1;
}
}
}
hvr_vertex_set(INFECTED, is_infected, vertex, ctx);
if (timestep < max_num_timesteps - 1) {
if (hvr_vertex_get(NEXT_CREATED, vertex, ctx) == 0) {
// Add a next
hvr_vertex_t *next = hvr_vertex_create(ctx);
double x = hvr_vertex_get(PX, vertex, ctx);
double y = hvr_vertex_get(PY, vertex, ctx);
double dst_x = hvr_vertex_get(DST_X, vertex, ctx);
double dst_y = hvr_vertex_get(DST_Y, vertex, ctx);
int next_timestep = timestep + 1;
if (next_timestep > max_modeled_timestep) {
max_modeled_timestep = next_timestep;
}
double new_x, new_y;
compute_next_pos(x, y, dst_x, dst_y, &new_x, &new_y);
hvr_vertex_set(TIME_STEP, next_timestep, next, ctx);
hvr_vertex_set(ACTOR_ID, actor_id, next, ctx);
hvr_vertex_set(PX, new_x, next, ctx);
hvr_vertex_set(PY, new_y, next, ctx);
hvr_vertex_set(INFECTED, is_infected, next, ctx);
hvr_vertex_set(DST_X, dst_x, next, ctx);
hvr_vertex_set(DST_Y, dst_y, next, ctx);
hvr_vertex_set(NEXT_CREATED, 0, next, ctx);
hvr_vertex_set(NEXT_ID, 0, next, ctx);
hvr_vertex_set(PREV_IS_INFECTED, is_infected, next, ctx);
hvr_vertex_set(PREV_PX, x, next, ctx);
hvr_vertex_set(PREV_PY, y, next, ctx);
hvr_vertex_set(NEXT_CREATED, 1, vertex, ctx);
hvr_vertex_set_uint64(NEXT_ID, next->id, vertex, ctx);
}
hvr_send_msg(hvr_vertex_get_uint64(NEXT_ID, vertex, ctx), vertex, ctx);
}
}
/*
* Callback used to check if this PE might interact with another PE.
*
* If partition is neighboring any partition in partitions, they might
* interact.
*/
void might_interact(const hvr_partition_t partition,
hvr_partition_t *interacting_partitions,
unsigned *n_interacting_partitions,
unsigned interacting_partitions_capacity,
hvr_ctx_t ctx) {
assert(partition != HVR_INVALID_PARTITION);
// The global dimensions of the full simulation space
const double global_x_dim = (double)n_cells_x * cell_dim_x;
const double global_y_dim = (double)n_cells_y * cell_dim_y;
// Dimension of each partition in the row, column, time directions
double y_dim = global_y_dim / (double)n_y_partition;
double x_dim = global_x_dim / (double)n_x_partition;
double time_dim = (double)max_num_timesteps / (double)n_time_partition;
/*
* For the given partition, the (time, row, column) coordinate of this
* partition in a 2D space.
*/
unsigned partition_time = partition / (n_y_partition * n_x_partition);
unsigned partition_y = (partition / n_x_partition) % n_y_partition;
unsigned partition_x = partition % n_x_partition;
// Get bounding box of partition in the grid coordinate system
double min_y = (double)partition_y * y_dim;
double max_y = min_y + y_dim;
double min_x = (double)partition_x * x_dim;
double max_x = min_x + x_dim;
double min_time = (double)partition_time * time_dim;
double max_time = min_time + time_dim;
/*
* Expand partition bounding box to include any possible points within
* infection_radius distance.
*/
min_time -= 1; // Only interact with previous and next timesteps
max_time += 1;
min_y -= infection_radius;
max_y += infection_radius;
min_x -= infection_radius;
max_x += infection_radius;
int min_partition_y, min_partition_x, max_partition_y,
max_partition_x, min_partition_time, max_partition_time;
if (min_y < 0.0) min_partition_y = 0;
else min_partition_y = (int)(min_y / y_dim);
if (min_x < 0.0) min_partition_x = 0;
else min_partition_x = (int)(min_x / x_dim);
if (min_time < 0.0) min_partition_time = 0;
else min_partition_time = (int)(min_time / time_dim);
if (max_y >= (double)global_y_dim) max_partition_y = n_y_partition - 1;
else max_partition_y = (int)(max_y / y_dim);
if (max_x >= (double)global_x_dim) max_partition_x = n_x_partition - 1;
else max_partition_x = (int)(max_x / x_dim);
if (max_time >= (double)max_num_timesteps) max_partition_time =
n_time_partition - 1;
else max_partition_time = (int)(max_time / time_dim);
assert(min_partition_y <= max_partition_y);
assert(min_partition_x <= max_partition_x);
assert(min_partition_time <= max_partition_time);
unsigned count_interacting_partitions = 0;
for (unsigned t = min_partition_time; t <= max_partition_time; t++) {
for (unsigned r = min_partition_y; r <= max_partition_y; r++) {
for (unsigned c = min_partition_x; c <= max_partition_x; c++) {
const unsigned part = t * n_y_partition * n_x_partition +
r * n_x_partition + c;
if (count_interacting_partitions >= interacting_partitions_capacity) {
fprintf(stderr, "time = (%d, %d) y = (%d, %d) x = (%d, %d) "
"current count = %u, capacity = %u\n",
min_partition_time, max_partition_time,
min_partition_y, max_partition_y,
min_partition_x, max_partition_x,
count_interacting_partitions,
interacting_partitions_capacity);
abort();
}
interacting_partitions[count_interacting_partitions++] = part;
}
}
}
*n_interacting_partitions = count_interacting_partitions;
}
/*
* Callback used by the HOOVER runtime to check if this PE can abort out of the
* simulation.
*/
void update_coupled_val(hvr_vertex_iter_t *iter, hvr_ctx_t ctx,
hvr_vertex_t *out_coupled_metric) {
// Abort if all of my member vertices are infected
size_t nset = 0;
hvr_vertex_t *vert = hvr_vertex_iter_next(iter);
while (vert) {
if ((int)hvr_vertex_get(TIME_STEP, vert, ctx) == max_num_timesteps - 1) {
if (hvr_vertex_get(INFECTED, vert, ctx) > 0.0) {
nset++;
}
}
vert = hvr_vertex_iter_next(iter);
}
hvr_vertex_set(0, (double)nset, out_coupled_metric, ctx);
hvr_vertex_set(1, (double)n_local_actors, out_coupled_metric, ctx);
}
int should_terminate(hvr_vertex_iter_t *iter, hvr_ctx_t ctx,
hvr_vertex_t *local_coupled_metric, // coupled_pes[shmem_my_pe()]
hvr_vertex_t *all_coupled_metrics, // Each PE's val
hvr_vertex_t *global_coupled_metric, // Sum reduction of coupled_pes
hvr_set_t *coupled_pes, // An array of size npes, with each PE's val
int n_coupled_pes,
int *updates_on_this_iter, // An array of size npes, the number of vertex updates done on each coupled PE
hvr_set_t *terminated_coupled_pes) {
int sum_updates = 0;
for (int i = 0; i < ctx->npes; i++) {
sum_updates += updates_on_this_iter[i];
}
unsigned local_nset = (unsigned)hvr_vertex_get(0, local_coupled_metric,
ctx);
unsigned global_nset = (unsigned)hvr_vertex_get(0,
global_coupled_metric, ctx);
unsigned global_nverts = (unsigned)hvr_vertex_get(1,
global_coupled_metric, ctx);
if (local_nset > 0) {
printf("PE %d - iter %lu - local set %u / %lu (%.2f%%)- # coupled = %d "
"- global set %u / %u (%.2f%%) - %u vertex updates globally\n",
pe,
(uint64_t)ctx->iter,
local_nset, n_local_actors,
100.0 * (double)local_nset / (double)n_local_actors,
n_coupled_pes,
global_nset,
global_nverts,
100.0 * (double)global_nset / (double)global_nverts,
sum_updates);
}
int aborting = 0;
if (n_coupled_pes == ctx->npes) {
if (sum_updates == 0) {
int ninfected = (int)hvr_vertex_get(0, local_coupled_metric, ctx);
double percent_infected = (double)ninfected /
(double)n_local_actors;
double global_percent_infected = 100.0 * (double)global_nset /
(double)global_nverts;
printf("PE %d leaving the simulation, %% local infected = %f "
"(%d / %lu), %% global infected = %f (%u / %u)\n",
shmem_my_pe(),
100.0 * percent_infected,
ninfected, n_local_actors,
global_percent_infected, global_nset, global_nverts);
aborting = 1;
}
}
return aborting;
}
static int safe_fread(double *buf, size_t n_to_read, FILE *fp) {
size_t err = fread(buf, sizeof(*buf), n_to_read, fp);
if (err == n_to_read) return 1;
else {
assert(feof(fp));
return 0;
}
}
int main(int argc, char **argv) {
hvr_ctx_t hvr_ctx;
if (argc != 11) {
fprintf(stderr, "usage: %s <cell-dim-y> <cell-dim-x> "
"<n-cells-y> <n-cells-x> "
"<max-num-timesteps> <infection-radius> "
"<time-limit> <input-file> <y-partitions> <x-partitions>\n",
argv[0]);
return 1;
}
cell_dim_y = atof(argv[1]);
cell_dim_x = atof(argv[2]);
n_cells_y = atoi(argv[3]);
n_cells_x = atoi(argv[4]);
max_num_timesteps = atoi(argv[5]);
infection_radius = atof(argv[6]);
int time_limit = atoi(argv[7]);
char *input_filename = argv[8];
n_time_partition = max_num_timesteps;
n_y_partition = atoi(argv[9]);
n_x_partition = atoi(argv[10]);
hvr_partition_t npartitions = n_time_partition * n_y_partition *
n_x_partition;
#ifdef MULTITHREADED
#pragma omp parallel
#pragma omp single
nthreads = omp_get_num_threads();
#endif
const double global_x_dim = (double)n_cells_x * cell_dim_x;
const double global_y_dim = (double)n_cells_y * cell_dim_y;
for (int i = 0; i < SHMEM_REDUCE_SYNC_SIZE; i++) {
p_sync[i] = SHMEM_SYNC_VALUE;
}
#ifdef MULTITHREADED
int provided = shmemx_init_thread(SHMEM_THREAD_MULTIPLE);
assert(provided == SHMEM_THREAD_MULTIPLE);
#else
shmem_init();
#endif
pe = shmem_my_pe();
npes = shmem_n_pes();
if (pe == 0) {
fprintf(stderr, "Running with %d PEs\n", npes);
#ifdef MULTITHREADED
fprintf(stderr, "Running with %d OMP threads\n", nthreads);
#endif
}
assert(npes == n_cells_y * n_cells_x);
hvr_ctx_create(&hvr_ctx);
const double my_cell_start_x = PE_COL_CELL_START(pe);
const double my_cell_end_x = my_cell_start_x + cell_dim_x;
const double my_cell_start_y = PE_ROW_CELL_START(pe);
const double my_cell_end_y = my_cell_start_y + cell_dim_y;
// unsigned long long start_count_local = hvr_current_time_us();
FILE *input = fopen(input_filename, "rb");
assert(input);
/*
* 0: actor id
* 1: px
* 2: py
* 3: dst_x
* 4: dst_y
* 5: infected
*/
double buf[6];
while (safe_fread(buf, 6, input)) {
if (buf[1] >= my_cell_start_x && buf[1] < my_cell_end_x &&
buf[2] >= my_cell_start_y && buf[2] < my_cell_end_y) {
n_local_actors++;
}
}
fclose(input);
// unsigned long long elapsed_count_local = hvr_current_time_us() -
// start_count_local;
// fprintf(stderr, "PE %d took %f ms to count local, %lu local actors\n", pe,
// (double)elapsed_count_local / 1000.0, n_local_actors);
// unsigned long long start_pop_local = hvr_current_time_us();
size_t index = 0;
input = fopen(input_filename, "rb");
while (safe_fread(buf, 6, input)) {
if (buf[1] >= my_cell_start_x && buf[1] < my_cell_end_x &&
buf[2] >= my_cell_start_y && buf[2] < my_cell_end_y) {
hvr_vertex_t *actor = hvr_vertex_create(hvr_ctx);
double actor_id = buf[0];
double x = buf[1];
double y = buf[2];
double dst_x = buf[3];
double dst_y = buf[4];
double infected = buf[5];
if (infected > 0.0) {
fprintf(stderr, "PE %d - actor %lu infected\n", pe,
(unsigned long)actor_id);
}
hvr_vertex_set(TIME_STEP, 0, actor, hvr_ctx);
hvr_vertex_set(ACTOR_ID, actor_id, actor, hvr_ctx);
hvr_vertex_set(PX, x, actor, hvr_ctx);
hvr_vertex_set(PY, y, actor, hvr_ctx);
hvr_vertex_set(INFECTED, infected, actor, hvr_ctx);
hvr_vertex_set(DST_X, dst_x, actor, hvr_ctx);
hvr_vertex_set(DST_Y, dst_y, actor, hvr_ctx);
hvr_vertex_set(NEXT_CREATED, 0, actor, hvr_ctx);
hvr_vertex_set(NEXT_ID, 0, actor, hvr_ctx);
hvr_vertex_set(PREV_IS_INFECTED, infected, actor, hvr_ctx);
hvr_vertex_set(PREV_PX, x, actor, hvr_ctx);
hvr_vertex_set(PREV_PY, y, actor, hvr_ctx);
index++;
}
}
assert(index == n_local_actors);
fclose(input);
size_t *actors_per_pe = (size_t *)shmem_malloc(npes * sizeof(*actors_per_pe));
assert(actors_per_pe);
for (int p = 0; p < npes; p++) {
shmem_putmem(actors_per_pe + pe, &n_local_actors,
sizeof(n_local_actors), p);
}
shmem_barrier_all();
for (int p = 0; p < npes; p++) {
total_n_actors += actors_per_pe[p];
}
// unsigned long long elapsed_pop_local = hvr_current_time_us() -
// start_pop_local;
// fprintf(stderr, "PE %d took %f ms to populate local\n", pe,
// (double)elapsed_pop_local / 1000.0);
if (pe == 0) {
fprintf(stderr, "Running for at most %d seconds\n", time_limit);
fprintf(stderr, "Using %u partitions (%u time partitions * %u y "
"partitions * %u x partitions)\n", npartitions,
n_time_partition, n_y_partition, n_x_partition);
fprintf(stderr, "Loading input from %s\n", input_filename);
fprintf(stderr, "~%lu actors per PE x %d PEs x %u timesteps = %lu "
"vertices across all PEs (~%f vertices per PE)\n",
n_local_actors,
npes,
max_num_timesteps,
total_n_actors * max_num_timesteps,
(double)(total_n_actors * max_num_timesteps) / (double)npes);
fprintf(stderr, "%d timesteps, y=%f x x=%f grid\n", max_num_timesteps,
global_y_dim, global_x_dim);
}
shmem_barrier_all();
hvr_init(npartitions,
update_vertex,
might_interact,
update_coupled_val,
actor_to_partition,
NULL, // start_time_step
should_have_edge,
should_terminate,
time_limit, // max_elapsed_seconds
1, // max_graph_traverse_depth
hvr_ctx);
const long long start_time = hvr_current_time_us();
hvr_body(hvr_ctx);
const long long elapsed_time = hvr_current_time_us() - start_time;
long long *elapsed_times = (long long *)shmem_malloc(
npes * sizeof(*elapsed_times));
assert(elapsed_times);
shmem_longlong_put(elapsed_times + pe, &elapsed_time, 1, 0);
shmem_barrier_all();
long long total_time = 0;
for (int p = 0; p < npes; p++) {
total_time += elapsed_times[p];
}
long long max_elapsed = 0;
for (int p = 0; p < npes; p++) {
if (elapsed_times[p] > max_elapsed) {
max_elapsed = elapsed_times[p];
}
}
uint64_t *msgs_sent = (uint64_t *)shmem_malloc(
npes * sizeof(*msgs_sent));
assert(msgs_sent);
uint64_t *msgs_recv = (uint64_t *)shmem_malloc(
npes * sizeof(*msgs_recv));
assert(msgs_recv);
int *modeled_timesteps = (int *)shmem_malloc(
npes * sizeof(*modeled_timesteps));
assert(modeled_timesteps);
shmem_int_put(modeled_timesteps + pe, &max_modeled_timestep, 1, 0);
shmem_uint64_put(msgs_sent + pe, &(hvr_ctx->total_vertex_msgs_sent), 1, 0);
shmem_uint64_put(msgs_recv + pe, &(hvr_ctx->total_vertex_msgs_recvd), 1, 0);
shmem_barrier_all();
uint64_t total_msgs_sent = msgs_sent[0];
uint64_t total_msgs_recv = msgs_recv[0];
int all_max_modeled_timestep = modeled_timesteps[0];
for (int p = 1; p < npes; p++) {
if (modeled_timesteps[p] < all_max_modeled_timestep) {
all_max_modeled_timestep = modeled_timesteps[p];
}
total_msgs_sent += msgs_sent[p];
total_msgs_recv += msgs_recv[p];
}
if (pe == 0) {
printf("%d PEs, %d timesteps, infection radius of %f, total CPU time = "
"%f ms, max elapsed = %f ms, ~%lu actors per PE, completed %d "
"iters\n", npes, max_num_timesteps, infection_radius,
(double)total_time / 1000.0, (double)max_elapsed / 1000.0,
n_local_actors, hvr_ctx->iter);
printf("In total %lu msgs sent, %lu msgs received\n", total_msgs_sent,
total_msgs_recv);
printf("Max modeled timestep across all PEs = %d, # vertices on PE 0 = "
"%lu\n", all_max_modeled_timestep, hvr_n_local_vertices(hvr_ctx));
for (int p = 0; p < npes; p++) {
printf(" PE %d got to timestep %d\n", p, modeled_timesteps[p]);
}
}
hvr_finalize(hvr_ctx);
shmem_finalize();
return 0;
}
|
channel_shuffle.h
|
// Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_CHANNEL_SHUFFLE_H_
#define MACE_KERNELS_CHANNEL_SHUFFLE_H_
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
namespace mace {
namespace kernels {
template<DeviceType D, typename T>
struct ChannelShuffleFunctor {
explicit ChannelShuffleFunctor(const int groups) : groups_(groups) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
MACE_RETURN_IF_ERROR(output->ResizeLike(input));
Tensor::MappingGuard logits_guard(input);
Tensor::MappingGuard output_guard(output);
const T *input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
index_t batch = input->dim(0);
index_t channels = input->dim(1);
index_t height = input->dim(2);
index_t width = input->dim(3);
index_t image_size = height * width;
index_t batch_size = channels * image_size;
index_t channels_per_group = channels / groups_;
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
const T *input_base = input_ptr + b * batch_size;
T *output_base = output_ptr + b * batch_size;
index_t g = c % groups_;
index_t idx = c / groups_;
for (index_t hw = 0; hw < height * width; ++hw) {
output_base[c * image_size + hw] = input_base[
(g * channels_per_group + idx) * image_size + hw];
}
}
}
return MACE_SUCCESS;
}
const int groups_;
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct ChannelShuffleFunctor<DeviceType::GPU, T> {
explicit ChannelShuffleFunctor(const int groups) : groups_(groups) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
const int groups_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_CHANNEL_SHUFFLE_H_
|
compressible_element_rotation_utility.h
|
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#ifndef KRATOS_COMPRESSIBLE_ELEMENT_ROTATION_UTILITY
#define KRATOS_COMPRESSIBLE_ELEMENT_ROTATION_UTILITY
// system includes
// external includes
// kratos includes
#include "includes/define.h"
#include "includes/node.h"
#include "containers/variable.h"
#include "geometries/geometry.h"
#include "utilities/coordinate_transformation_utilities.h"
namespace Kratos {
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions.
template<class TLocalMatrixType, class TLocalVectorType>
class CompressibleElementRotationUtility: public CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double> {
public:
///@name Type Definitions
///@{
/// Pointer definition of CompressibleElementRotationUtility
KRATOS_CLASS_POINTER_DEFINITION(CompressibleElementRotationUtility);
typedef Node<3> NodeType;
typedef Geometry< Node<3> > GeometryType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/** @param DomainSize Number of space dimensions (2 or 3)
* @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows.
* @param rFlag Flag used to mark nodes nodes where local system contributions will be rotated. All nodes with rFlag == true will be rotated.
* @param Zero The zero value for the variable.
*/
CompressibleElementRotationUtility(
const unsigned int DomainSize,
const Kratos::Flags& rFlag = SLIP):
CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double>(DomainSize,DomainSize+2,rFlag)
{}
/// Destructor.
~CompressibleElementRotationUtility() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Rotate the local system contributions so that they are oriented with each node's normal.
/**
@param rLocalMatrix Local system matrix
@param rLocalVector Local RHS vector
@param rGeometry A reference to the element's (or condition's) geometry
*/
void Rotate(
TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (this->GetDomainSize() == 2) this->template RotateAux<2,4,1>(rLocalMatrix,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAux<3,5,1>(rLocalMatrix,rLocalVector,rGeometry);
}
/// RHS only version of Rotate
void Rotate(
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
TLocalMatrixType dummy = ZeroMatrix(rLocalVector.size(),rLocalVector.size());
if (this->GetDomainSize() == 2) this->template RotateAux<2,4,1>(dummy,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAux<3,5,1>(dummy,rLocalVector,rGeometry);
}
/// Apply slip boundary conditions to the rotated local contributions.
/** This function takes the local system contributions rotated so each
node's velocities are expressed using a base oriented with its normal
and imposes that the normal velocity is equal to the mesh velocity in
the normal direction.
*/
void ApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes)
if (LocalSize > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize() + 1; // +1 assumes DOF ordering as density-momentum-energy
for( unsigned int i = 0; i < j; ++i)// Skip term (i,i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
for( unsigned int i = j+1; i < LocalSize; ++i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
rLocalVector(j) = 0.0;
rLocalMatrix(j,j) = 1.0;
}
}
}
}
/// RHS only version of ApplySlipCondition
void ApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (rLocalVector.size() > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if( this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize() + 1; // +1 assumes DOF ordering as density-momentum-energy
rLocalVector[j] = 0.0;
}
}
}
}
/// Transform nodal velocities to the rotated coordinates (aligned with each node's normal)
void RotateVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType momentum(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(momentum,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
//this->RotationOperator<TLocalMatrixType>(Rotation,);
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rMomentum = itNode->FastGetSolutionStepValue(MOMENTUM);
for(unsigned int i = 0; i < 3; i++) momentum[i] = rMomentum[i];
noalias(Tmp) = prod(rRot,momentum);
for(unsigned int i = 0; i < 3; i++) rMomentum[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rMomentum = itNode->FastGetSolutionStepValue(MOMENTUM);
for(unsigned int i = 0; i < 2; i++) momentum[i] = rMomentum[i];
noalias(Tmp) = prod(rRot,momentum);
for(unsigned int i = 0; i < 2; i++) rMomentum[i] = Tmp[i];
}
}
}
}
/// Transform nodal velocities from the rotated system to the original one
void RecoverVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType momentum(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(momentum,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rMomentum = itNode->FastGetSolutionStepValue(MOMENTUM);
for(unsigned int i = 0; i < 3; i++) momentum[i] = rMomentum[i];
noalias(Tmp) = prod(trans(rRot),momentum);
for(unsigned int i = 0; i < 3; i++) rMomentum[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rMomentum = itNode->FastGetSolutionStepValue(MOMENTUM);
for(unsigned int i = 0; i < 2; i++) momentum[i] = rMomentum[i];
noalias(Tmp) = prod(trans(rRot),momentum);
for(unsigned int i = 0; i < 2; i++) rMomentum[i] = Tmp[i];
}
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "CompressibleElementRotationUtility";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "CompressibleElementRotationUtility";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CompressibleElementRotationUtility& operator=(CompressibleElementRotationUtility const& rOther) {}
/// Copy constructor.
CompressibleElementRotationUtility(CompressibleElementRotationUtility const& rOther) {}
///@}
};
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::istream& operator >>(std::istream& rIStream,
CompressibleElementRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
return rIStream;
}
/// output stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::ostream& operator <<(std::ostream& rOStream,
const CompressibleElementRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}
#endif // KRATOS_COMPRESSIBLE_ELEMENT_ROTATION_UTILITY
|
cmontecarlo.c
|
#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifdef WITHOPENMP
#include <omp.h>
#endif
#include "io.h"
#include "abbrev.h"
#include "status.h"
#include "rpacket.h"
#include "cmontecarlo.h"
/** Look for a place to insert a value in an inversely sorted float array.
*
* @param x an inversely (largest to lowest) sorted float array
* @param x_insert a value to insert
* @param imin lower bound
* @param imax upper bound
*
* @return index of the next boundary to the left
*/
tardis_error_t
reverse_binary_search (const double *x, double x_insert,
int64_t imin, int64_t imax, int64_t * result)
{
/*
Have in mind that *x points to a reverse sorted array.
That is large values will have small indices and small ones
will have large indices.
*/
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (x_insert > x[imin] || x_insert < x[imax])
{
ret_val = TARDIS_ERROR_BOUNDS_ERROR;
}
else
{
int imid = (imin + imax) >> 1;
while (imax - imin > 2)
{
if (x[imid] < x_insert)
{
imax = imid + 1;
}
else
{
imin = imid;
}
imid = (imin + imax) >> 1;
}
if (imax - imin == 2 && x_insert < x[imin + 1])
{
*result = imin + 1;
}
else
{
*result = imin;
}
}
return ret_val;
}
/** Insert a value in to an array of line frequencies
*
* @param nu array of line frequencies
* @param nu_insert value of nu key
* @param number_of_lines number of lines in the line list
*
* @return index of the next line ot the red. If the key value is redder than the reddest line returns number_of_lines.
*/
tardis_error_t
line_search (const double *nu, double nu_insert, int64_t number_of_lines,
int64_t * result)
{
tardis_error_t ret_val = TARDIS_ERROR_OK;
int64_t imin = 0;
int64_t imax = number_of_lines - 1;
if (nu_insert > nu[imin])
{
*result = imin;
}
else if (nu_insert < nu[imax])
{
*result = imax + 1;
}
else
{
ret_val = reverse_binary_search (nu, nu_insert, imin, imax, result);
*result = *result + 1;
}
return ret_val;
}
tardis_error_t
binary_search (const double *x, double x_insert, int64_t imin,
int64_t imax, int64_t * result)
{
/*
Have in mind that *x points to a sorted array.
Like [1,2,3,4,5,...]
*/
int imid;
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (x_insert < x[imin] || x_insert > x[imax])
{
ret_val = TARDIS_ERROR_BOUNDS_ERROR;
}
else
{
while (imax >= imin)
{
imid = (imin + imax) / 2;
if (x[imid] == x_insert)
{
*result = imid;
break;
}
else if (x[imid] < x_insert)
{
imin = imid + 1;
}
else
{
imax = imid - 1;
}
}
if (imax - imid == 2 && x_insert < x[imin + 1])
{
*result = imin;
}
else
{
*result = imin;
}
}
return ret_val;
}
double
rpacket_doppler_factor (const rpacket_t *packet, const storage_model_t *storage)
{
return 1.0 -
rpacket_get_mu (packet) * rpacket_get_r (packet) *
storage->inverse_time_explosion * INVERSE_C;
}
double
bf_cross_section (const storage_model_t * storage, int64_t continuum_id, double comov_nu)
{
double bf_xsect;
double *x_sect = storage->photo_xsect[continuum_id]->x_sect;
double *nu = storage->photo_xsect[continuum_id]->nu;
switch (storage->bf_treatment)
{
case LIN_INTERPOLATION:
{
int64_t result;
tardis_error_t error = binary_search (nu, comov_nu, 0,
storage->photo_xsect[continuum_id]->no_of_points - 1, &result);
if (error == TARDIS_ERROR_BOUNDS_ERROR)
{
bf_xsect = 0.0;
}
else
{
bf_xsect = x_sect[result-1] + (comov_nu - nu[result-1]) / (nu[result] - nu[result-1])
* (x_sect[result] - x_sect[result-1]);
}
break;
}
case HYDROGENIC:
{
double nu_ratio = nu[0] / comov_nu;
bf_xsect = x_sect[0] * nu_ratio * nu_ratio * nu_ratio;
break;
}
default:
fprintf (stderr, "(%d) is not a valid bound-free cross section treatment.\n", storage->bf_treatment);
exit(1);
}
return bf_xsect;
}
void calculate_chi_bf (rpacket_t * packet, storage_model_t * storage)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
int64_t no_of_continuum_edges = storage->no_of_edges;
int64_t current_continuum_id;
line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, ¤t_continuum_id);
rpacket_set_current_continuum_id (packet, current_continuum_id);
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / (KB * T));
double bf_helper = 0;
for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++)
{
// get the level population for the level ijk in the current shell:
double l_pop = storage->l_pop[shell_id * no_of_continuum_edges + i];
// get the level population ratio \frac{n_{0,j+1,k}}{n_{i,j,k}} \frac{n_{i,j,k}}{n_{0,j+1,k}}^{*}:
double l_pop_r = storage->l_pop_r[shell_id * no_of_continuum_edges + i];
double bf_x_sect = bf_cross_section (storage, i, comov_nu);
if (bf_x_sect == 0.0)
{
break;
}
bf_helper += l_pop * bf_x_sect * (1.0 - l_pop_r * boltzmann_factor) * doppler_factor;
packet->chi_bf_tmp_partial[i] = bf_helper;
}
rpacket_set_chi_boundfree(packet, bf_helper);
}
void calculate_chi_ff (rpacket_t * packet, const storage_model_t * storage)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / KB / T);
double chi_ff_factor = storage->chi_ff_factor[shell_id];
double chi_ff = chi_ff_factor * (1 - boltzmann_factor) * pow (comov_nu, -3);
rpacket_set_chi_freefree (packet, chi_ff * doppler_factor);
}
void
compute_distance2boundary (rpacket_t * packet, const storage_model_t * storage)
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double r_outer = storage->r_outer[rpacket_get_current_shell_id (packet)];
double r_inner = storage->r_inner[rpacket_get_current_shell_id (packet)];
double check, distance;
if (mu > 0.0)
{ // direction outward
rpacket_set_next_shell_id (packet, 1);
distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu);
}
else
{ // going inward
if ( (check = r_inner * r_inner + (r * r * (mu * mu - 1.0)) )>= 0.0)
{ // hit inner boundary
rpacket_set_next_shell_id (packet, -1);
distance = - r * mu - sqrt (check);
}
else
{ // miss inner boundary
rpacket_set_next_shell_id (packet, 1);
distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu);
}
}
rpacket_set_d_boundary (packet, distance);
}
tardis_error_t
compute_distance2line (rpacket_t * packet, const storage_model_t * storage)
{
if (!rpacket_get_last_line (packet))
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double nu = rpacket_get_nu (packet);
double nu_line = rpacket_get_nu_line (packet);
double distance, nu_diff;
double t_exp = storage->time_explosion;
double inverse_t_exp = storage->inverse_time_explosion;
int64_t cur_zone_id = rpacket_get_current_shell_id (packet);
double doppler_factor = 1.0 - mu * r * inverse_t_exp * INVERSE_C;
double comov_nu = nu * doppler_factor;
if ( (nu_diff = comov_nu - nu_line) >= 0)
{
distance = (nu_diff / nu) * C * t_exp;
rpacket_set_d_line (packet, distance);
return TARDIS_ERROR_OK;
}
else
{
if (rpacket_get_next_line_id (packet) == storage->no_of_lines - 1)
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "Last line in line list reached!");
}
else if (rpacket_get_next_line_id (packet) == 0)
{
fprintf (stderr, "First line in line list!");
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
else
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
fprintf (stderr, "ERROR: Comoving nu less than nu_line!\n");
fprintf (stderr, "comov_nu = %f\n", comov_nu);
fprintf (stderr, "nu_line = %f\n", nu_line);
fprintf (stderr, "(comov_nu - nu_line) / nu_line = %f\n",
(comov_nu - nu_line) / nu_line);
fprintf (stderr, "r = %f\n", r);
fprintf (stderr, "mu = %f\n", mu);
fprintf (stderr, "nu = %f\n", nu);
fprintf (stderr, "doppler_factor = %f\n", doppler_factor);
fprintf (stderr, "cur_zone_id = %" PRIi64 "\n", cur_zone_id);
return TARDIS_ERROR_COMOV_NU_LESS_THAN_NU_LINE;
}
}
else
{
rpacket_set_d_line (packet, MISS_DISTANCE);
return TARDIS_ERROR_OK;
}
}
void
compute_distance2continuum (rpacket_t * packet, storage_model_t * storage)
{
double chi_continuum, d_continuum;
double chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] * storage->sigma_thomson;
if (storage->cont_status == CONTINUUM_ON)
{
if (packet->compute_chi_bf)
{
calculate_chi_bf (packet, storage);
calculate_chi_ff (packet, storage);
}
else
{
packet->compute_chi_bf=true;
}
chi_electron *= rpacket_doppler_factor (packet, storage);
chi_continuum = rpacket_get_chi_boundfree (packet) + rpacket_get_chi_freefree (packet) + chi_electron;
d_continuum = rpacket_get_tau_event (packet) / chi_continuum;
}
else
{
chi_continuum = chi_electron;
d_continuum = storage->inverse_electron_densities[rpacket_get_current_shell_id (packet)] *
storage->inverse_sigma_thomson * rpacket_get_tau_event (packet);
}
if (rpacket_get_virtual_packet(packet) > 0)
{
//Set all continuum distances to MISS_DISTANCE in case of an virtual_packet
d_continuum = MISS_DISTANCE;
packet->compute_chi_bf = false;
}
else
{
// fprintf(stderr, "--------\n");
// fprintf(stderr, "nu = %e \n", rpacket_get_nu(packet));
// fprintf(stderr, "chi_electron = %e\n", chi_electron);
// fprintf(stderr, "chi_boundfree = %e\n", calculate_chi_bf(packet, storage));
// fprintf(stderr, "chi_line = %e \n", rpacket_get_tau_event(packet) / rpacket_get_d_line(packet));
// fprintf(stderr, "--------\n");
//rpacket_set_chi_freefree(packet, chi_freefree);
rpacket_set_chi_electron (packet, chi_electron);
}
rpacket_set_chi_continuum (packet, chi_continuum);
rpacket_set_d_continuum (packet, d_continuum);
}
void
macro_atom (rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int emit = 0, i = 0, offset = -1;
uint64_t activate_level = rpacket_get_macro_atom_activation_level (packet);
while (emit >= 0)
{
double event_random = rk_double (mt_state);
i = storage->macro_block_references[activate_level] - 1;
double p = 0.0;
offset = storage->transition_probabilities_nd *
rpacket_get_current_shell_id (packet);
do
{
++i;
p += storage->transition_probabilities[offset + i];
}
while (p <= event_random);
emit = storage->transition_type[i];
activate_level = storage->destination_level_id[i];
}
switch (emit)
{
case BB_EMISSION:
line_emission (packet, storage, storage->transition_line_id[i], mt_state);
break;
case BF_EMISSION:
rpacket_set_current_continuum_id (packet, storage->transition_line_id[i]);
storage->last_line_interaction_out_id[rpacket_get_id (packet)] =
rpacket_get_current_continuum_id (packet);
continuum_emission (packet, storage, mt_state, sample_nu_free_bound, 3);
break;
case FF_EMISSION:
continuum_emission (packet, storage, mt_state, sample_nu_free_free, 4);
break;
case ADIABATIC_COOLING:
storage->last_interaction_type[rpacket_get_id (packet)] = 5;
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
break;
default:
fprintf (stderr, "This process for macro-atom deactivation should not exist! (emit = %d)\n", emit);
exit(1);
}
}
void
move_packet (rpacket_t * packet, storage_model_t * storage, double distance)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
if (distance > 0.0)
{
double r = rpacket_get_r (packet);
double new_r =
sqrt (r * r + distance * distance +
2.0 * r * distance * rpacket_get_mu (packet));
rpacket_set_mu (packet,
(rpacket_get_mu (packet) * r + distance) / new_r);
rpacket_set_r (packet, new_r);
if (rpacket_get_virtual_packet (packet) <= 0)
{
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->js[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->nubars[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance * comov_nu;
if (storage->cont_status)
{
increment_continuum_estimators(packet, storage, distance, comov_nu, comov_energy);
}
}
}
}
void
increment_continuum_estimators (const rpacket_t * packet, storage_model_t * storage, double distance,
double comov_nu, double comov_energy)
{
int64_t current_continuum_id;
int64_t no_of_continuum_edges = storage->no_of_edges;
int64_t shell_id = rpacket_get_current_shell_id (packet);
line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, ¤t_continuum_id);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / (KB * T));
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->ff_heating_estimator[shell_id] += comov_energy * distance * rpacket_get_chi_freefree (packet);
for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++)
{
double bf_xsect = bf_cross_section (storage, i, comov_nu);
int64_t photo_ion_idx = i * storage->no_of_shells + shell_id;
double photo_ion_estimator_helper = comov_energy * distance * bf_xsect / comov_nu;
double bf_heating_estimator_helper =
comov_energy * distance * bf_xsect * (1. - storage->continuum_list_nu[i] / comov_nu);
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->photo_ion_estimator[photo_ion_idx] += photo_ion_estimator_helper;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->stim_recomb_estimator[photo_ion_idx] += photo_ion_estimator_helper * boltzmann_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->bf_heating_estimator[photo_ion_idx] += bf_heating_estimator_helper;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->stim_recomb_cooling_estimator[photo_ion_idx] += bf_heating_estimator_helper * boltzmann_factor;
if (photo_ion_estimator_helper != 0.0)
{
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->photo_ion_estimator_statistics[photo_ion_idx] += 1;
}
else
{
break;
}
}
}
void
increment_j_blue_estimator (const rpacket_t * packet, storage_model_t * storage,
double d_line, int64_t j_blue_idx)
{
if (storage->line_lists_j_blues != NULL)
{
double r = rpacket_get_r (packet);
double r_interaction =
sqrt (r * r + d_line * d_line +
2.0 * r * d_line * rpacket_get_mu (packet));
double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction;
double doppler_factor = 1.0 - mu_interaction * r_interaction *
storage->inverse_time_explosion * INVERSE_C;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->line_lists_j_blues[j_blue_idx] +=
comov_energy / rpacket_get_nu (packet);
}
}
void
increment_Edotlu_estimator (const rpacket_t * packet, storage_model_t * storage, double d_line, int64_t line_idx)
{
if (storage->line_lists_Edotlu != NULL)
{
double r = rpacket_get_r (packet);
double r_interaction =
sqrt (r * r + d_line * d_line +
2.0 * r * d_line * rpacket_get_mu (packet));
double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction;
double doppler_factor = 1.0 - mu_interaction * r_interaction *
storage->inverse_time_explosion * INVERSE_C;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->line_lists_Edotlu[line_idx] += comov_energy; //rpacket_get_energy (packet);
}
}
int64_t
montecarlo_one_packet (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_mode, rk_state *mt_state)
{
int64_t reabsorbed=-1;
if (virtual_mode == 0)
{
reabsorbed = montecarlo_one_packet_loop (storage, packet, 0, mt_state);
}
else
{
if ((rpacket_get_nu (packet) > storage->spectrum_virt_start_nu) && (rpacket_get_nu(packet) < storage->spectrum_virt_end_nu))
{
for (int64_t i = 0; i < rpacket_get_virtual_packet_flag (packet); i++)
{
double weight;
rpacket_t virt_packet = *packet;
double mu_min;
if (rpacket_get_r(&virt_packet) > storage->r_inner[0])
{
mu_min =
-1.0 * sqrt (1.0 -
(storage->r_inner[0] / rpacket_get_r(&virt_packet)) *
(storage->r_inner[0] / rpacket_get_r(&virt_packet)));
}
else
{
mu_min = 0.0;
}
double mu_bin = (1.0 - mu_min) / rpacket_get_virtual_packet_flag (packet);
rpacket_set_mu(&virt_packet,mu_min + (i + rk_double (mt_state)) * mu_bin);
switch (virtual_mode)
{
case -2:
weight = 1.0 / rpacket_get_virtual_packet_flag (packet);
break;
case -1:
weight =
2.0 * rpacket_get_mu(&virt_packet) /
rpacket_get_virtual_packet_flag (packet);
break;
case 1:
weight =
(1.0 -
mu_min) / 2.0 / rpacket_get_virtual_packet_flag (packet);
break;
default:
fprintf (stderr, "Something has gone horribly wrong!\n");
// FIXME MR: we need to somehow signal an error here
// I'm adding an exit() here to inform the compiler about the impossible path
exit(1);
}
double doppler_factor_ratio =
rpacket_doppler_factor (packet, storage) /
rpacket_doppler_factor (&virt_packet, storage);
rpacket_set_energy(&virt_packet,
rpacket_get_energy (packet) * doppler_factor_ratio);
rpacket_set_nu(&virt_packet,rpacket_get_nu (packet) * doppler_factor_ratio);
reabsorbed = montecarlo_one_packet_loop (storage, &virt_packet, 1, mt_state);
#ifdef WITH_VPACKET_LOGGING
#ifdef WITHOPENMP
#pragma omp critical
{
#endif // WITHOPENMP
if (storage->virt_packet_count >= storage->virt_array_size)
{
storage->virt_array_size *= 2;
storage->virt_packet_nus = safe_realloc(storage->virt_packet_nus, sizeof(double) * storage->virt_array_size);
storage->virt_packet_energies = safe_realloc(storage->virt_packet_energies, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_in_nu = safe_realloc(storage->virt_packet_last_interaction_in_nu, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_type = safe_realloc(storage->virt_packet_last_interaction_type, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_in_id = safe_realloc(storage->virt_packet_last_line_interaction_in_id, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_out_id = safe_realloc(storage->virt_packet_last_line_interaction_out_id, sizeof(int64_t) * storage->virt_array_size);
}
storage->virt_packet_nus[storage->virt_packet_count] = rpacket_get_nu(&virt_packet);
storage->virt_packet_energies[storage->virt_packet_count] = rpacket_get_energy(&virt_packet) * weight;
storage->virt_packet_last_interaction_in_nu[storage->virt_packet_count] = storage->last_interaction_in_nu[rpacket_get_id (packet)];
storage->virt_packet_last_interaction_type[storage->virt_packet_count] = storage->last_interaction_type[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_in_id[storage->virt_packet_count] = storage->last_line_interaction_in_id[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_out_id[storage->virt_packet_count] = storage->last_line_interaction_out_id[rpacket_get_id (packet)];
storage->virt_packet_count += 1;
#ifdef WITHOPENMP
}
#endif // WITHOPENMP
#endif // WITH_VPACKET_LOGGING
if ((rpacket_get_nu(&virt_packet) < storage->spectrum_end_nu) &&
(rpacket_get_nu(&virt_packet) > storage->spectrum_start_nu))
{
#ifdef WITHOPENMP
#pragma omp critical
{
#endif // WITHOPENMP
int64_t virt_id_nu =
floor ((rpacket_get_nu(&virt_packet) -
storage->spectrum_start_nu) /
storage->spectrum_delta_nu);
storage->spectrum_virt_nu[virt_id_nu] +=
rpacket_get_energy(&virt_packet) * weight;
#ifdef WITHOPENMP
}
#endif // WITHOPENMP
}
}
}
else
{
return 1;
}
}
return reabsorbed;
}
void
move_packet_across_shell_boundary (rpacket_t * packet,
storage_model_t * storage, double distance, rk_state *mt_state)
{
move_packet (packet, storage, distance);
if (rpacket_get_virtual_packet (packet) > 0)
{
double delta_tau_event = rpacket_get_chi_continuum(packet) * distance;
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) +
delta_tau_event);
packet->compute_chi_bf = true;
}
else
{
rpacket_reset_tau_event (packet, mt_state);
}
if ((rpacket_get_current_shell_id (packet) < storage->no_of_shells - 1
&& rpacket_get_next_shell_id (packet) == 1)
|| (rpacket_get_current_shell_id (packet) > 0
&& rpacket_get_next_shell_id (packet) == -1))
{
rpacket_set_current_shell_id (packet,
rpacket_get_current_shell_id (packet) +
rpacket_get_next_shell_id (packet));
}
else if (rpacket_get_next_shell_id (packet) == 1)
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
else if ((storage->reflective_inner_boundary == 0) ||
(rk_double (mt_state) > storage->inner_boundary_albedo))
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
else
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
rpacket_set_mu (packet, rk_double (mt_state));
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, -2, mt_state);
}
}
}
void
montecarlo_thomson_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
move_packet (packet, storage, distance);
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
rpacket_reset_tau_event (packet, mt_state);
storage->last_interaction_type[rpacket_get_id (packet)] = 1;
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, 1, mt_state);
}
}
void
montecarlo_bound_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
// current position in list of continuum edges -> indicates which bound-free processes are possible
int64_t ccontinuum = rpacket_get_current_continuum_id (packet);
// Determine in which continuum the bf-absorption occurs
double chi_bf = rpacket_get_chi_boundfree (packet);
double zrand = rk_double (mt_state);
double zrand_x_chibf = zrand * chi_bf;
while ((ccontinuum < storage->no_of_edges - 1) && (packet->chi_bf_tmp_partial[ccontinuum] <= zrand_x_chibf))
{
ccontinuum++;
}
rpacket_set_current_continuum_id (packet, ccontinuum);
/* For consistency reasons the branching between ionization and thermal energy is determined using the
comoving frequency at the initial position instead of the frequency at the point of interaction */
double comov_nu = rpacket_get_nu (packet) * rpacket_doppler_factor (packet, storage);
/* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation
in the co-moving frame. */
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_type[rpacket_get_id (packet)] = 3; // last interaction was a bf-absorption
storage->last_line_interaction_in_id[rpacket_get_id (packet)] = ccontinuum;
// Convert the rpacket to thermal or ionization energy
zrand = rk_double (mt_state);
int64_t activate_level = (zrand < storage->continuum_list_nu[ccontinuum] / comov_nu) ?
storage->cont_edge2macro_level[ccontinuum] : storage->kpacket2macro_level;
rpacket_set_macro_atom_activation_level (packet, activate_level);
macro_atom (packet, storage, mt_state);
}
void
montecarlo_free_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
/* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation
in the co-moving frame. */
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_type[rpacket_get_id (packet)] = 4; // last interaction was a ff-absorption
// Create a k-packet
rpacket_set_macro_atom_activation_level (packet, storage->kpacket2macro_level);
macro_atom (packet, storage, mt_state);
}
double
sample_nu_free_free (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double zrand = rk_double (mt_state);
return -KB * T / H * log(zrand); // Lucy 2003 MC II Eq.41
}
double
sample_nu_free_bound (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int64_t continuum_id = rpacket_get_current_continuum_id (packet);
double th_frequency = storage->continuum_list_nu[continuum_id];
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double zrand = rk_double (mt_state);
return th_frequency * (1 - (KB * T / H / th_frequency * log(zrand))); // Lucy 2003 MC II Eq.26
}
void
montecarlo_line_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
uint64_t next_line_id = rpacket_get_next_line_id (packet);
uint64_t line2d_idx = next_line_id +
storage->no_of_lines * rpacket_get_current_shell_id (packet);
if (rpacket_get_virtual_packet (packet) == 0)
{
increment_j_blue_estimator (packet, storage, distance, line2d_idx);
increment_Edotlu_estimator (packet, storage, distance, line2d_idx);
}
double tau_line =
storage->line_lists_tau_sobolevs[line2d_idx];
double tau_continuum = rpacket_get_chi_continuum(packet) * distance;
double tau_combined = tau_line + tau_continuum;
//rpacket_set_next_line_id (packet, rpacket_get_next_line_id (packet) + 1);
if (next_line_id + 1 == storage->no_of_lines)
{
rpacket_set_last_line (packet, true);
}
if (rpacket_get_virtual_packet (packet) > 0)
{
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) + tau_line);
rpacket_set_next_line_id (packet, next_line_id + 1);
test_for_close_line (packet, storage);
}
else if (rpacket_get_tau_event (packet) < tau_combined)
{ // Line absorption occurs
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_in_nu[rpacket_get_id (packet)] =
rpacket_get_nu (packet);
storage->last_line_interaction_in_id[rpacket_get_id (packet)] =
next_line_id;
storage->last_line_interaction_shell_id[rpacket_get_id (packet)] =
rpacket_get_current_shell_id (packet);
storage->last_interaction_type[rpacket_get_id (packet)] = 2;
if (storage->line_interaction_id == 0)
{
line_emission (packet, storage, next_line_id, mt_state);
}
else if (storage->line_interaction_id >= 1)
{
rpacket_set_macro_atom_activation_level (packet,
storage->line2macro_level_upper[next_line_id]);
macro_atom (packet, storage, mt_state);
}
}
else
{ // Packet passes line without interacting
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) - tau_line);
rpacket_set_next_line_id (packet, next_line_id + 1);
packet->compute_chi_bf = false;
test_for_close_line (packet, storage);
}
}
void
line_emission (rpacket_t * packet, storage_model_t * storage, int64_t emission_line_id, rk_state *mt_state)
{
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
storage->last_line_interaction_out_id[rpacket_get_id (packet)] = emission_line_id;
if (storage->cont_status == CONTINUUM_ON)
{
storage->last_interaction_out_type[rpacket_get_id (packet)] = 2;
}
rpacket_set_nu (packet,
storage->line_list_nu[emission_line_id] * inverse_doppler_factor);
rpacket_set_nu_line (packet, storage->line_list_nu[emission_line_id]);
rpacket_set_next_line_id (packet, emission_line_id + 1);
rpacket_reset_tau_event (packet, mt_state);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
bool virtual_close_line = false;
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) <
(rpacket_get_nu_line (packet)* 1e-7))
{
virtual_close_line = true;
}
// QUESTIONABLE!!!
bool old_close_line = rpacket_get_close_line (packet);
rpacket_set_close_line (packet, virtual_close_line);
montecarlo_one_packet (storage, packet, 1, mt_state);
rpacket_set_close_line (packet, old_close_line);
virtual_close_line = false;
}
test_for_close_line (packet, storage);
}
void test_for_close_line (rpacket_t * packet, const storage_model_t * storage)
{
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)*
1e-7))
{
rpacket_set_close_line (packet, true);
}
}
void
continuum_emission (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state,
pt2sample_nu sample_nu_continuum, int64_t emission_type_id)
{
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
double nu_comov = sample_nu_continuum (packet, storage, mt_state);
rpacket_set_nu (packet, nu_comov * inverse_doppler_factor);
rpacket_reset_tau_event (packet, mt_state);
storage->last_interaction_out_type[rpacket_get_id (packet)] = emission_type_id;
// Have to find current position in line list
int64_t current_line_id;
line_search (storage->line_list_nu, nu_comov, storage->no_of_lines, ¤t_line_id);
bool last_line = (current_line_id == storage->no_of_lines);
rpacket_set_last_line (packet, last_line);
rpacket_set_next_line_id (packet, current_line_id);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, 1, mt_state);
}
}
static void
montecarlo_compute_distances (rpacket_t * packet, storage_model_t * storage)
{
// Check if the last line was the same nu as the current line.
if (rpacket_get_close_line (packet))
{
// If so set the distance to the line to 0.0
rpacket_set_d_line (packet, 0.0);
// Reset close_line.
rpacket_set_close_line (packet, false);
}
else
{
compute_distance2boundary (packet, storage);
compute_distance2line (packet, storage);
// FIXME MR: return status of compute_distance2line() is ignored
compute_distance2continuum (packet, storage);
}
}
montecarlo_event_handler_t
get_event_handler (rpacket_t * packet, storage_model_t * storage,
double *distance, rk_state *mt_state)
{
montecarlo_compute_distances (packet, storage);
double d_boundary = rpacket_get_d_boundary (packet);
double d_continuum = rpacket_get_d_continuum (packet);
double d_line = rpacket_get_d_line (packet);
montecarlo_event_handler_t handler;
if (d_line <= d_boundary && d_line <= d_continuum)
{
*distance = d_line;
handler = &montecarlo_line_scatter;
}
else if (d_boundary <= d_continuum)
{
*distance = d_boundary;
handler = &move_packet_across_shell_boundary;
}
else
{
*distance = d_continuum;
handler = montecarlo_continuum_event_handler (packet, storage, mt_state);
}
return handler;
}
montecarlo_event_handler_t
montecarlo_continuum_event_handler (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state)
{
if (storage->cont_status)
{
double zrand_x_chi_cont = rk_double (mt_state) * rpacket_get_chi_continuum (packet);
double chi_th = rpacket_get_chi_electron (packet);
double chi_bf = rpacket_get_chi_boundfree (packet);
if (zrand_x_chi_cont < chi_th)
{
return &montecarlo_thomson_scatter;
}
else if (zrand_x_chi_cont < chi_th + chi_bf)
{
return &montecarlo_bound_free_scatter;
}
else
{
return &montecarlo_free_free_scatter;
}
}
else
{
return &montecarlo_thomson_scatter;
}
}
int64_t
montecarlo_one_packet_loop (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_packet, rk_state *mt_state)
{
rpacket_set_tau_event (packet, 0.0);
rpacket_set_nu_line (packet, 0.0);
rpacket_set_virtual_packet (packet, virtual_packet);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_IN_PROCESS);
// Initializing tau_event if it's a real packet.
if (virtual_packet == 0)
{
rpacket_reset_tau_event (packet,mt_state);
}
// For a virtual packet tau_event is the sum of all the tau's that the packet passes.
while (rpacket_get_status (packet) == TARDIS_PACKET_STATUS_IN_PROCESS)
{
// Check if we are at the end of line list.
if (!rpacket_get_last_line (packet))
{
rpacket_set_nu_line (packet,
storage->
line_list_nu[rpacket_get_next_line_id
(packet)]);
}
double distance;
get_event_handler (packet, storage, &distance, mt_state) (packet, storage,
distance, mt_state);
if (virtual_packet > 0 && rpacket_get_tau_event (packet) > 10.0)
{
rpacket_set_tau_event (packet, 100.0);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
}
if (virtual_packet > 0)
{
rpacket_set_energy (packet,
rpacket_get_energy (packet) * exp (-1.0 *
rpacket_get_tau_event
(packet)));
}
return rpacket_get_status (packet) ==
TARDIS_PACKET_STATUS_REABSORBED ? 1 : 0;
}
void
montecarlo_main_loop(storage_model_t * storage, int64_t virtual_packet_flag, int nthreads, unsigned long seed)
{
int64_t finished_packets = 0;
storage->virt_packet_count = 0;
#ifdef WITH_VPACKET_LOGGING
storage->virt_packet_nus = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_energies = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_in_nu = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_type = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_in_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_out_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_array_size = storage->no_of_packets;
#endif // WITH_VPACKET_LOGGING
#ifdef WITHOPENMP
omp_set_dynamic(0);
if (nthreads > 0)
{
omp_set_num_threads(nthreads);
}
#pragma omp parallel firstprivate(finished_packets)
{
rk_state mt_state;
rk_seed (seed + omp_get_thread_num(), &mt_state);
#pragma omp master
{
fprintf(stderr, "Running with OpenMP - %d threads\n", omp_get_num_threads());
print_progress(0, storage->no_of_packets);
}
#else
rk_state mt_state;
rk_seed (seed, &mt_state);
fprintf(stderr, "Running without OpenMP\n");
#endif
int64_t chi_bf_tmp_size = (storage->cont_status) ? storage->no_of_edges : 0;
double *chi_bf_tmp_partial = safe_malloc(sizeof(double) * chi_bf_tmp_size);
#pragma omp for
for (int64_t packet_index = 0; packet_index < storage->no_of_packets; ++packet_index)
{
int reabsorbed = 0;
rpacket_t packet;
rpacket_set_id(&packet, packet_index);
rpacket_init(&packet, storage, packet_index, virtual_packet_flag, chi_bf_tmp_partial);
if (virtual_packet_flag > 0)
{
reabsorbed = montecarlo_one_packet(storage, &packet, -1, &mt_state);
}
reabsorbed = montecarlo_one_packet(storage, &packet, 0, &mt_state);
storage->output_nus[packet_index] = rpacket_get_nu(&packet);
if (reabsorbed == 1)
{
storage->output_energies[packet_index] = -rpacket_get_energy(&packet);
}
else
{
storage->output_energies[packet_index] = rpacket_get_energy(&packet);
}
if ( ++finished_packets%100 == 0 )
{
#ifdef WITHOPENMP
// WARNING: This only works with a static sheduler and gives an approximation of progress.
// The alternative would be to have a shared variable but that could potentially decrease performance when using many threads.
if (omp_get_thread_num() == 0 )
print_progress(finished_packets * omp_get_num_threads(), storage->no_of_packets);
#else
print_progress(finished_packets, storage->no_of_packets);
#endif
}
}
free(chi_bf_tmp_partial);
#ifdef WITHOPENMP
}
#endif
print_progress(storage->no_of_packets, storage->no_of_packets);
fprintf(stderr,"\n");
}
|
GB_binop__max_fp64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp64)
// A*D function (colscale): GB (_AxD__max_fp64)
// D*A function (rowscale): GB (_DxB__max_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp64)
// C=scalar+B GB (_bind1st__max_fp64)
// C=scalar+B' GB (_bind1st_tran__max_fp64)
// C=A+scalar GB (_bind2nd__max_fp64)
// C=A'+scalar GB (_bind2nd_tran__max_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = fmax (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmax (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmax (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmax (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmax (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmax (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
proj.c
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sched.h>
#include <omp.h>
#include "allocate.h"
#include "data.h"
#include "prepro.h"
#include "proj.h"
void createSinogram(struct Sinogram *sinogram)
{
sinogram->sino = (ENTRY *)get_spc((sinogram->geom_info.Nr)*(sinogram->geom_info.Nc)*(sinogram->geom_info.Nv), sizeof(ENTRY));
}
void freeSinogram(struct Sinogram *sinogram)
{
free(sinogram->sino);
}
void createImage(struct Image *image)
{
image->img = (ENTRY *)get_spc((image->img_info.Nx)*(image->img_info.Ny)*(image->img_info.Nz), sizeof(ENTRY));
}
void freeImage(struct Image *image)
{
free(image->img);
}
void fillGeomInfo(struct GeomInfo *geom_info) /* fill in the intermediate variables */
{
geom_info->alphac0 = -(geom_info->Nc-1.0)*(geom_info->Del_alphac)/2.0 + geom_info->del_alphac;
geom_info->Del_zs = (geom_info->u)*(geom_info->Del_dr)*(geom_info->Del_beta)/(2.0*PI);
geom_info->detc = (geom_info->Nc)*(geom_info->Del_alphac);
geom_info->detr = (geom_info->Nr)*(geom_info->Del_dr);
geom_info->half_detr = (geom_info->Nr-1.0)*(geom_info->Del_dr)/2.0;
geom_info->cone_zbuffer= geom_info->detr*(geom_info->r_si+geom_info->fov/2.0)/(2.0*geom_info->r_sd);
}
void fillImgInfo(struct ImgInfo *img_info) /* fill in the intermediate variables */
{
img_info->x0 = img_info->xc - (img_info->Del_xy)*(img_info->Nx-1)/2.0;
img_info->y0 = img_info->yc - (img_info->Del_xy)*(img_info->Ny-1)/2.0;
img_info->z0 = img_info->zc - (img_info->Del_z)*(img_info->Nz-1)/2.0;
}
/* sjk: check information for consistency */
void checkInfo(struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
float x,y,dist;
/* find pixel distance furthest from iso (just check corners) */
x = img_info->x0 - img_info->Del_xy/2.0;
y = img_info->y0 - img_info->Del_xy/2.0;
dist = sqrt(x*x + y*y);
x = img_info->x0 - img_info->Del_xy/2.0 + img_info->Nx;
y = img_info->y0 - img_info->Del_xy/2.0;
dist = max(dist,sqrt(x*x+y*y));
x = img_info->x0 - img_info->Del_xy/2.0;
y = img_info->y0 - img_info->Del_xy/2.0 + img_info->Ny;
dist = max(dist,sqrt(x*x+y*y));
x = img_info->x0 - img_info->Del_xy/2.0 + img_info->Nx;
y = img_info->y0 - img_info->Del_xy/2.0 + img_info->Ny;
dist = max(dist,sqrt(x*x+y*y));
if(dist>geom_info->r_si) {
fprintf(stdout,"ERROR: You have pixels outside the source radius.");
exit(-1);
}
}
void createSourceLocInfo(
struct SourceLocInfo *source_loc_info,
struct GeomInfo *geom_info)
{
source_loc_info->beta = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->xs = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->ys = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->zs = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
}
void compSourceLocInfo(
struct SourceLocInfo *source_loc_info,
struct GeomInfo *geom_info)
{
int iv; /* view index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
source_loc_info->beta[iv] = geom_info->beta0 + iv*(geom_info->Del_beta);
source_loc_info->xs[iv] = (geom_info->r_si)*cos(source_loc_info->beta[iv]);
source_loc_info->ys[iv] = (geom_info->r_si)*sin(source_loc_info->beta[iv]);
source_loc_info->zs[iv] = iv*(geom_info->Del_zs); /* assume z start from 0 */
}
}
void freeSourceLocInfo(struct SourceLocInfo *source_loc_info)
{
free(source_loc_info->beta);
free(source_loc_info->xs);
free(source_loc_info->ys);
free(source_loc_info->zs);
}
void createViewXYInfo(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
view_xy_info->ic_start = (CHANNEL *)get_spc(geom_info->Nv, sizeof(CHANNEL));
view_xy_info->ic_num = (PROCHANNEL *)get_spc(geom_info->Nv, sizeof(PROCHANNEL));
view_xy_info->Mag = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
view_xy_info->Wr = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
view_xy_info->B = (ENTRY **)get_spc(geom_info->Nv, sizeof(ENTRY *));
}
void compViewXYInfo(
float x,
float y,
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct SourceLocInfo *source_loc_info)
{
int iv; /* view index */
float theta; /* slope of the ray */
float theta_td; /* adjusted theta, in [-PI/4, PI/4] */
float costh; /* cos(theta_td) */
float alphaj; /* voxel angle relative to ray through iso */
float alphaj_td; /* alphaj_td = (alphaj + PI) mod 2*PI - PI */
float alpha_min;
float alpha_max;
float r_sv; /* source to voxel */
float Wc; /* projection angle width */
float del_c; /* angle between ray through voxel center and ray through detector center */
float Bij;
CHANNEL ic_end;
PROCHANNEL p;
int ic; /* channel index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
theta = atan2((source_loc_info->ys[iv]-y), (source_loc_info->xs[iv]-x));
if (theta >= -PI/4.0)
{
theta_td = fmod((theta + PI/4.0), (PI/2.0)) - (PI/4.0);
}
else
{
theta_td = fmod((theta + PI/4.0), (PI/2.0)) + (PI/4.0);
}
costh = cos(theta_td);
alphaj = theta - source_loc_info->beta[iv];
alphaj_td = adjust(alphaj);
r_sv = sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y));
view_xy_info->Mag[iv] = (geom_info->r_sd)/r_sv;
view_xy_info->Wr[iv] = (img_info->Del_z)*(view_xy_info->Mag[iv]);
Wc = (img_info->Del_xy)*costh/r_sv;
alpha_min = alphaj_td - geom_info->alphac0 - (Wc - geom_info->Del_alphac)/2.0;
alpha_max = alphaj_td - geom_info->alphac0 + (Wc + geom_info->Del_alphac)/2.0;
if (alpha_max < 0 || alpha_min > geom_info->detc)
{
view_xy_info->ic_num[iv] = 0;
}
else
{
view_xy_info->ic_start[iv] = (CHANNEL)max((CHANNEL)floor(alpha_min/(geom_info->Del_alphac)), 0);
ic_end = (CHANNEL)min((CHANNEL)floor(alpha_max/(geom_info->Del_alphac)), (CHANNEL)(geom_info->Nc-1));
view_xy_info->ic_num[iv] = ((PROCHANNEL)(ic_end - view_xy_info->ic_start[iv] + 1));
}
view_xy_info->B[iv] = (ENTRY *)get_spc((int)(view_xy_info->ic_num[iv]), sizeof(ENTRY));
for (p = 0; p < view_xy_info->ic_num[iv]; p++)
{
ic = (int)(view_xy_info->ic_start[iv] + p);
del_c = adjust(alphaj - (ic*(geom_info->Del_alphac) + geom_info->alphac0));
Bij = clip(0.0, ((Wc+(geom_info->Del_alphac))/2.0)-fabs(del_c), min(Wc, (geom_info->Del_alphac)));
Bij *= ((img_info->Del_xy)/((geom_info->Del_alphac)*costh));
view_xy_info->B[iv][p] = Bij;
}
}
}
void freeViewXYInfoB(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
int iv; /* view index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
free(view_xy_info->B[iv]);
}
}
void freeViewXYInfo(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
free(view_xy_info->ic_start);
free(view_xy_info->ic_num);
free(view_xy_info->Mag);
free(view_xy_info->Wr);
free(view_xy_info->B);
}
void createViewXYZInfo(
struct ViewXYZInfo *view_xyz_info,
struct GeomInfo *geom_info)
{
view_xyz_info->ir_start = (ROW *)get_spc(geom_info->Nv, sizeof(ROW));
view_xyz_info->ir_num = (PROROW *)get_spc(geom_info->Nv, sizeof(PROROW));
}
void compViewXYZInfo(
float z,
struct ViewXYZInfo *view_xyz_info,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct SourceLocInfo *source_loc_info,
struct ViewXYInfo *view_xy_info)
{
int iv; /* view index */
float d;
float d_min;
float d_max;
ROW ir_end;
int iv_end; /* sjk */
if(0) /* sjk: this block is replaced with the lines following */
{
/* calculate d */
d = geom_info->Nr*geom_info->Del_dr*(geom_info->r_si+geom_info->fov/2.0)/(2.0*geom_info->r_sd);
if (z < source_loc_info->zs[0]-d || z > source_loc_info->zs[geom_info->Nv-1]+d)
{
view_xyz_info->iv_num = 0;
}
else
{
for (iv = 0; iv < geom_info->Nv; iv++)
{
if (z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d)
{
view_xyz_info->iv_start = iv;
break;
}
}
view_xyz_info->iv_num = 0;
for (iv = 0; iv < geom_info->Nv; iv++)
{
if (z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d)
{
view_xyz_info->iv_num++;
}
}
/*for (; z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d; iv++)
{
view_xyz_info->iv_num++;
}*/
}
}
/* sjk: this block replaces the above, finding the iv range in closed form */
view_xyz_info->iv_start=max(0,(int)floor((z-img_info->Del_z/2.0-geom_info->cone_zbuffer)/geom_info->Del_zs));
iv_end = min(geom_info->Nv-1,(int)ceil((z+img_info->Del_z/2.0+geom_info->cone_zbuffer)/geom_info->Del_zs));
if((iv_end<0) || (view_xyz_info->iv_start > (geom_info->Nv-1)))
view_xyz_info->iv_num=0;
else
view_xyz_info->iv_num= iv_end - view_xyz_info->iv_start + 1;
/* sjk: moved this block down so that we don't have to go through all views */
for (iv = view_xyz_info->iv_start; iv <= iv_end; iv++) /* sjk */
{
d_min = (geom_info->half_detr+geom_info->Del_dr/2.0-view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]+img_info->Del_z/2.0));
d_max = (geom_info->half_detr+geom_info->Del_dr/2.0-view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]-img_info->Del_z/2.0));
if (d_max < 0 || d_min > geom_info->detr)
{
view_xyz_info->ir_num[iv] = 0;
}
else
{
view_xyz_info->ir_start[iv] = (ROW)max((ROW)floor(d_min/(geom_info->Del_dr)), 0);
ir_end = (ROW)min((ROW)floor(d_max/(geom_info->Del_dr)), (ROW)(geom_info->Nr-1));
view_xyz_info->ir_num[iv] = ((PROROW)(ir_end - view_xyz_info->ir_start[iv] + 1));
}
}
}
void freeViewXYZInfo(struct ViewXYZInfo *view_xyz_info)
{
free(view_xyz_info->ir_start);
free(view_xyz_info->ir_num);
}
void createACol(struct ACol *A_col, int length)
{
A_col->index = (int *)get_spc(length, sizeof(int));
A_col->val = (ENTRY *)get_spc(length, sizeof(ENTRY));
A_col->array_length = length; /* sjk */
}
/* sjk */
void increaseAColLength(struct ACol *A_col) /* increase array length by 10% */
{
int *index,i,new_length;
ENTRY *val;
new_length=1.1*A_col->array_length;
index = (int *)get_spc(new_length,sizeof(int)); /* create new array */
for(i=0;i<A_col->array_length;i++) /* copy old into new array */
index[i] = A_col->index[i];
free(A_col->index); /* free old array */
A_col->index = index; /* point to new array */
val = (ENTRY *)get_spc(new_length,sizeof(ENTRY));
for(i=0;i<A_col->array_length;i++)
val[i] = A_col->val[i];
free(A_col->val);
A_col->val= val;
A_col->array_length= new_length;
}
void freeACol(struct ACol *A_col)
{
free(A_col->index);
free(A_col->val);
}
void compAColxyz(
int jjx,
int jjy,
int jjz,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct ACol *col_xyz)
{
static int first = 1;
static struct ACol A_col;
static struct ACol *A_col_pt;
static struct ACol ***A_array; /* store the whole A matrix, each column is A[jx][jy][jz] */
int jx, jy, jz; /* voxel indicies */
int iv, ic, ir, l, p, q; /* detector indicies */
int r;
float x, y, z; /* voxel coordinate */
float del_r;
float Atmp, Cij;
struct SourceLocInfo source_loc_info;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
if (first == 1) /* if the function is first called, precompute the whole A matrix and store it */
{
first = 0;
A_array = (struct ACol ***)multialloc(sizeof(struct ACol), 3, img_info->Nx, img_info->Ny, img_info->Nz);/* no allocation for arrays */
createACol(&A_col, COL_LEN); /* TODO, COL_LEN hard-coded */
/* allocate precomputing structure */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info); /* populate SourceLocInfo structure */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
fprintf(stdout, "\ninitializing A matrix ...\n");
for (jx = 0 ; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, &source_loc_info); /* populate ViewXYInfo structure */
for (jz = 0; jz < img_info->Nz; jz++)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, &source_loc_info, &view_xy_info); /* poplutate ViewXYZInfo structure */
A_col.n_index = 0;
for (l = 0; l < view_xyz_info.iv_num; l++) /* view loop */
{
iv = view_xyz_info.iv_start + l;
for (p = 0; p < view_xy_info.ic_num[iv]; p++) /* channel loop */
{
ic = view_xy_info.ic_start[iv] + p;
for (q = 0; q < view_xyz_info.ir_num[iv]; q++) /* row loop */
{
ir = view_xyz_info.ir_start[iv] + q;
/* ATTENTION! CHANGE SIGN HERE ROW 0 IS CLOSEST !! */
del_r = view_xy_info.Mag[iv]*(z-source_loc_info.zs[iv]) + ir*geom_info->Del_dr - geom_info->half_detr;
Cij = clip(0.0, ((view_xy_info.Wr[iv]+geom_info->Del_dr)/2.0)-fabs(del_r), min(view_xy_info.Wr[iv], geom_info->Del_dr)); /* sjk: fixed typo */
Cij *= (sqrt((source_loc_info.xs[iv]-x)*(source_loc_info.xs[iv]-x)+(source_loc_info.ys[iv]-y)*(source_loc_info.ys[iv]-y)+(source_loc_info.zs[iv]-z)*(source_loc_info.zs[iv]-z)));
Cij /= (sqrt((source_loc_info.xs[iv]-x)*(source_loc_info.xs[iv]-x) +(source_loc_info.ys[iv]-y)*(source_loc_info.ys[iv]-y))*geom_info->Del_dr);
Atmp = view_xy_info.B[iv][p]*Cij;
if (Atmp > EPSILON) /* if nonzero entry */
{
A_col.index[A_col.n_index] = iv*geom_info->Nc*geom_info->Nr + ic*geom_info->Nr + ir;
A_col.val[A_col.n_index] = Atmp;
A_col.n_index++;
}
}
}
}
/* here we finish computing one column of A for a specific (x,y,z) voxel */
/* store it in A_array */
A_array[jx][jy][jz].index = (int *)get_spc(A_col.n_index, sizeof(int));
A_array[jx][jy][jz].val = (ENTRY *)get_spc(A_col.n_index, sizeof(ENTRY));
A_array[jx][jy][jz].n_index = A_col.n_index;
for (r = 0; r < A_col.n_index; r++)
{
A_array[jx][jy][jz].index[r] = (int)A_col.index[r];
A_array[jx][jy][jz].val[r] = (ENTRY)A_col.val[r];
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeSourceLocInfo(&source_loc_info);
fprintf(stdout, "finish computing A matrix!\n");
}
A_col_pt = &A_array[jjx][jjy][jjz];
col_xyz->n_index = A_col_pt->n_index;
for (r = 0; r < A_col_pt->n_index; r++)
{
col_xyz->index[r] = A_col_pt->index[r];
col_xyz->val[r] = A_col_pt->val[r];
}
}
void compAColxyzOnFly(
float x,
float y,
float z,
struct GeomInfo *geom_info,
struct SourceLocInfo *source_loc_info,
struct ViewXYInfo *view_xy_info,
struct ViewXYZInfo *view_xyz_info,
struct ACol *A_col)
{
int iv, ic, ir, l, p, q;
float del_r;
float Cij;
float Atmp;
A_col->n_index = 0;
for (l = 0; l < view_xyz_info->iv_num; l++) /* view loop */
{
iv = view_xyz_info->iv_start + l;
for (p = 0; p < view_xy_info->ic_num[iv]; p++) /* channel loop */
{
ic = view_xy_info->ic_start[iv] + p;
for (q = 0; q < view_xyz_info->ir_num[iv]; q++) /* row loop */
{
ir = view_xyz_info->ir_start[iv] + q;
/* ATTENTION!! CHANGE SIGN HERE! ROW 0 IS CLOSEST */
del_r = view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]) + ir*geom_info->Del_dr - geom_info->half_detr;
Cij = clip(0.0, (((view_xy_info->Wr[iv]+geom_info->Del_dr)/2.0)-fabs(del_r)), min(view_xy_info->Wr[iv], geom_info->Del_dr));
Cij *= (sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y) + (source_loc_info->zs[iv]-z)*(source_loc_info->zs[iv]-z)));
Cij /= (sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y))*geom_info->Del_dr);
Atmp = view_xy_info->B[iv][p]*Cij;
if (Atmp > EPSILON) /* non-zero entry */
{
A_col->index[A_col->n_index] = iv*geom_info->Nc*geom_info->Nr + ic*geom_info->Nr + ir;
A_col->val[A_col->n_index] = Atmp;
A_col->n_index++;
/* sjk: */
if(A_col->n_index >= A_col->array_length)
{
fprintf(stdout,"Increasing size of A column by 10%% (%d).\n",(int)1.1*A_col->array_length);
increaseAColLength(A_col);
}
}
}
}
}
}
void forwardProject(ENTRY *AX, ENTRY *X, unsigned short *AX_mask, char **recon_mask, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, t;
struct SourceLocInfo source_loc_info;
/* initialize projection */
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
{
AX[i] = 0.0;
}
if(AX_mask != NULL)
{
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
AX_mask[i] = 0;
}
/* allocate precomputing structures */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
/* for each voxel in the image */
fprintf(stdout, "\nforward projecting (parallelized version) ...\n");
#pragma omp parallel
{
paraForwardProject(geom_info,img_info,&source_loc_info,X,AX,AX_mask,recon_mask);
}
fprintf(stdout, "\nfinish forward projection!\n");
freeSourceLocInfo(&source_loc_info);
}
void paraForwardProject(struct GeomInfo *geom_info,struct ImgInfo *img_info,struct SourceLocInfo *source_loc_info,ENTRY *X,ENTRY *AX,unsigned short *AX_mask,char **recon_mask)
{
int tid = omp_get_thread_num();
int indx,offset1,offset2; /* sjk */
int jx, jy, jz, jzmax, Nyz, r;
float x, y, z;
struct ACol col_xyz;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
fprintf(stdout,"tid is %d omp_get_num_threads() %d\n",tid,omp_get_num_threads());
jzmax = (tid+1)*img_info->Nz/omp_get_num_threads();
if (tid == (omp_get_num_threads()-1))
{
jzmax = img_info->Nz;
}
Nyz = img_info->Ny*img_info->Nz;
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
offset1=jx*Nyz;
for (jy = 0; jy < img_info->Ny; jy++)
{
if(recon_mask[jx][jy])
/* sjk: everything outside mask has been set to 0, and never updated */
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, source_loc_info);
offset2=offset1+jy*img_info->Nz;
for (jz = tid*img_info->Nz/omp_get_num_threads(); jz < jzmax; jz++)
{
indx=offset2+jz;
if(X[indx]>0)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
#pragma omp critical
{
for (r = 0; r < col_xyz.n_index; r++)
{
AX[col_xyz.index[r]] += col_xyz.val[r]*X[indx];
if(AX_mask != NULL)
{
if(X[indx]>hu2miu(3000,MIU_AIR,MIU_WATER))
AX_mask[col_xyz.index[r]] += 1;
}
}
}
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeACol(&col_xyz);
}
void serialForwardProject(ENTRY *AX, ENTRY *X, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, jx, jy, jz, r, Nyz;
float x, y, z;
struct SourceLocInfo source_loc_info;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
struct ACol col_xyz;
/* initialize projection */
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
{
AX[i] = 0.0;
}
/* allocate precomputing structures */
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
Nyz = img_info->Ny*img_info->Nz;
/* for each voxel in the image */
fprintf(stdout, "\nforward projecting (serial version) ...\n");
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, &source_loc_info);
for (jz = 0; jz < img_info->Nz; jz++)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, &source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, &source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
for (r = 0; r < col_xyz.n_index; r++)
{
AX[col_xyz.index[r]] += col_xyz.val[r]*X[jx*Nyz+jy*img_info->Nz+jz];
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
fprintf(stdout, "\nfinish forward projection!\n");
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeSourceLocInfo(&source_loc_info);
freeACol(&col_xyz);
}
void backProject(ENTRY *AX, ENTRY *X, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, t, len;
struct SourceLocInfo source_loc_info;
//pthread_t thread[NUM_CORE];
//struct paraForwardProjectData thread_data[NUM_CORE];
/* initialize projection */
len=img_info->Nx * img_info->Ny * img_info->Nz;
for (i = 0; i < len; i++)
X[i] = 0.0;
/* allocate precomputing structures */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
/* for each voxel in the image */
fprintf(stdout, "\nback projecting (parallelized version) ...\n");
/*
for (t = 0; t < NUM_CORE; t++)
{
thread_data[t].tid = t;
thread_data[t].geom_info = geom_info;
thread_data[t].img_info = img_info;
thread_data[t].source_loc_info = &source_loc_info;
thread_data[t].X = X;
thread_data[t].AX = AX;
pthread_create(&thread[t], NULL, paraBackProject, (void *)&thread_data[t]);
}
for (t = 0; t < NUM_CORE; t++)
{
pthread_join(thread[t], NULL);
}
*/
paraBackProject(geom_info,img_info,&source_loc_info,X,AX);
freeSourceLocInfo(&source_loc_info);
}
void *paraBackProject(struct GeomInfo *geom_info,struct ImgInfo *img_info,struct SourceLocInfo *source_loc_info,ENTRY *X,ENTRY *AX)
{
int tid = omp_get_thread_num();
//struct GeomInfo *geom_info;
//struct ImgInfo *img_info;
//struct SourceLocInfo *source_loc_info;
//ENTRY *X;
//ENTRY *AX;
//struct paraForwardProjectData *data;
int indx,offset1,offset2; /* sjk */
int jx, jy, jz, jzmax, Nyz, r;
float x, y, z;
struct ACol col_xyz;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
float sum;
//data = (struct paraForwardProjectData *)input;
//tid = data->tid;
//geom_info = data->geom_info;
//img_info = data->img_info;
//source_loc_info = data->source_loc_info;
//X = data->X;
//AX = data->AX;
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
jzmax = (tid+1)*img_info->Nz/omp_get_num_threads();
if (tid == (omp_get_num_threads()-1))
{
jzmax = img_info->Nz;
}
Nyz = img_info->Ny*img_info->Nz;
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
offset1=jx*Nyz; /* sjk */
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, source_loc_info);
offset2=offset1+jy*img_info->Nz; /* sjk */
for (jz = tid*img_info->Nz/omp_get_num_threads(); jz < jzmax; jz++)
{
indx=offset2+jz; /* sjk */
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
sum=0;
for (r = 0; r < col_xyz.n_index; r++)
{
sum += AX[col_xyz.index[r]] * col_xyz.val[r];
/*AX[col_xyz.index[r]] += col_xyz.val[r]*X[indx]; */
}
X[indx]=sum/col_xyz.n_index; /* divide by number of non-zero terms */
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeACol(&col_xyz);
return 0;
}
|
stream.c
|
/*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 80000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 100
#endif
#endif
#ifndef NTIMES
# define NTIMES 100
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to [email protected]
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static inline unsigned long long cycles()
{
unsigned long long u;
//asm volatile ("rdtsc;shlq $32,%%rdx;orq %%rdx,%%rax":"=a"(u)::"%rdx");
asm volatile ("rdtscp;shlq $32,%%rdx;orq %%rdx,%%rax;movq %%rax,%0":"=q"(u)::"%rax", "%rdx", "rcx");
return u;
}
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
unsigned long long c0 = 0;
long long c1 = 0;
c0 = cycles();
sleep(1);
c1 = cycles();
printf("1 second sleep, number of cycles = %ld\n", c1 - c0);
scalar = 3.0;
c0 = cycles();
//printf("c0 = %ld\n", c0);
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
__asm__ volatile("# Triad begins");
tuned_STREAM_Triad(scalar);
__asm__ volatile("# Triad ends");
//printf("%ld\n", c0);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
c1 = cycles();
//printf("c1 = %ld\n", c1);
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
//printf("cycles : %lf\n", (double) (c1 - c0)/NTIMES/STREAM_ARRAY_SIZE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
inline
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
GB_unop__log_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log_fp32_fp32
// op(A') function: GB_unop_tran__log_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = logf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = logf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = logf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = logf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = logf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8581.c
|
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for
for (t4 = 1; t4 <= nx - 1; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 32)
for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 1; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 32)
for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 2; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 32)
for (t10 = t8; t10 <= (ny - 2 < t8 + 31 ? ny - 2 : t8 + 31); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
opencl_pgpwde_fmt_plug.c
|
/*
* Format for brute-forcing PGP WDE disk images.
*
* This software is Copyright (c) 2017 Dhiru Kholia <dhiru at openwall.net> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_pgpwde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_pgpwde);
#else
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "aes.h"
#include "sha.h"
#include "common-opencl.h"
#include "options.h"
#include "pgpwde_common.h"
#define FORMAT_LABEL "pgpwde-opencl"
#define ALGORITHM_NAME "SHA1 OpenCL"
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define PLAINTEXT_LENGTH 124
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} pgpwde_password;
typedef struct {
uint8_t v[32];
} pgpwde_hash;
typedef struct {
uint32_t saltlen;
uint32_t bytes;
uint32_t key_len;
uint8_t salt[16];
} pgpwde_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt *cur_salt;
static cl_int cl_error;
static pgpwde_password *inbuffer;
static pgpwde_hash *outbuffer;
static pgpwde_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
static const char *warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(pgpwde_password) * gws;
outsize = sizeof(pgpwde_hash) * gws;
settingsize = sizeof(pgpwde_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d",
PLAINTEXT_LENGTH);
opencl_init("$JOHN/kernels/pgpwde_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "pgpwde", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(pgpwde_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 300);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
currentsalt.bytes = cur_salt->bytes;
/* NOTE saltlen and key_len are currently hard-coded in kernel, for speed */
currentsalt.saltlen = 16;
currentsalt.key_len = 32;
memcpy((char*)currentsalt.salt, cur_salt->salt, currentsalt.saltlen);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint32_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint32_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
size_t *lws = local_work_size ? &local_work_size : NULL;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]),
"Run kernel");
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]),
"Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char key[40];
int ret = -1;
memcpy(key, outbuffer[index].v, 32);
ret = pgpwde_decrypt_and_verify(key, cur_salt->esk, 128);
cracked[index] = (0 == ret);
if (ret == 0) {
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_pgpwde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
pgpwde_tests,
},
{
init,
done,
reset,
fmt_default_prepare,
pgpwde_valid,
fmt_default_split,
fmt_default_binary,
pgpwde_get_salt,
{
0
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
GB_binop__pow_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int64)
// C=scalar+B GB (_bind1st__pow_int64)
// C=scalar+B' GB (_bind1st_tran__pow_int64)
// C=A+scalar GB (_bind2nd__pow_int64)
// C=A'+scalar GB (_bind2nd_tran__pow_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_pow_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT64 || GxB_NO_POW_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lock-nested-unrelated.c
|
/*
* lock-nested-unrelated.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
omp_nest_lock_t lock;
omp_init_nest_lock(&lock);
#pragma omp parallel num_threads(2) shared(var)
{
omp_set_nest_lock(&lock);
omp_set_nest_lock(&lock);
// Dummy locking.
omp_unset_nest_lock(&lock);
omp_unset_nest_lock(&lock);
var++;
}
omp_destroy_nest_lock(&lock);
fprintf(stderr, "DONE\n");
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
builder.h
|
// Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "timer.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_, bool invert = true>
class BuilderBase {
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_nodes_ = -1;
int64_t num_edges_ = 0;
int64_t base_graph_num_edges_ = 0;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli) {
symmetrize_ = cli_.symmetrize();
needs_weights_ = !std::is_same<NodeID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) {
return e.u;
}
DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) {
return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w);
}
NodeID_ FindMaxNodeID(const EdgeList &el) {
NodeID_ max_seen = 0;
#pragma omp parallel for reduction(max : max_seen)
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (NodeID_) e.v);
}
return max_seen;
}
// pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) {
// pvector<NodeID_> degrees(num_nodes_, 0);
// #pragma omp parallel for
// for (auto it = el.begin(); it < el.end(); it++) {
// Edge e = *it;
// if (symmetrize_ || (!symmetrize_ && !transpose))
// fetch_and_add(degrees[e.u], 1);
// if (symmetrize_ || (!symmetrize_ && transpose))
// fetch_and_add(degrees[(NodeID_) e.v], 1);
// }
// return degrees;
// }
//
// static
// pvector<SGOffset> PrefixSum(const pvector<NodeID_> °rees) {
// pvector<SGOffset> sums(degrees.size() + 1);
// SGOffset total = 0;
// for (size_t n=0; n < degrees.size(); n++) {
// sums[n] = total;
// total += degrees[n];
// }
// sums[degrees.size()] = total;
// return sums;
// }
//
// static
// pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> °rees) {
// const size_t block_size = 1<<20;
// const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
// pvector<SGOffset> local_sums(num_blocks);
// #pragma omp parallel for
// for (size_t block=0; block < num_blocks; block++) {
// SGOffset lsum = 0;
// size_t block_end = std::min((block + 1) * block_size, degrees.size());
// for (size_t i=block * block_size; i < block_end; i++)
// lsum += degrees[i];
// local_sums[block] = lsum;
// }
// pvector<SGOffset> bulk_prefix(num_blocks+1);
// SGOffset total = 0;
// for (size_t block=0; block < num_blocks; block++) {
// bulk_prefix[block] = total;
// total += local_sums[block];
// }
// bulk_prefix[num_blocks] = total;
// pvector<SGOffset> prefix(degrees.size() + 1);
// #pragma omp parallel for
// for (size_t block=0; block < num_blocks; block++) {
// SGOffset local_total = bulk_prefix[block];
// size_t block_end = std::min((block + 1) * block_size, degrees.size());
// for (size_t i=block * block_size; i < block_end; i++) {
// prefix[i] = local_total;
// local_total += degrees[i];
// }
// }
// prefix[degrees.size()] = bulk_prefix[num_blocks];
// return prefix;
// }
//
// // Removes self-loops and redundant edges
// // Side effect: neighbor IDs will be sorted
// void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose,
// DestID_*** sq_index, DestID_** sq_neighs) {
// pvector<NodeID_> diffs(g.num_nodes());
// DestID_ *n_start, *n_end;
// #pragma omp parallel for private(n_start, n_end)
// for (NodeID_ n=0; n < g.num_nodes(); n++) {
// if (transpose) {
// n_start = g.in_neigh(n).begin();
// n_end = g.in_neigh(n).end();
// } else {
// n_start = g.out_neigh(n).begin();
// n_end = g.out_neigh(n).end();
// }
// std::sort(n_start, n_end);
// DestID_ *new_end = std::unique(n_start, n_end);
// new_end = std::remove(n_start, new_end, n);
// diffs[n] = new_end - n_start;
// }
// pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
// *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]];
// *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
// #pragma omp parallel for private(n_start)
// for (NodeID_ n=0; n < g.num_nodes(); n++) {
// if (transpose)
// n_start = g.in_neigh(n).begin();
// else
// n_start = g.out_neigh(n).begin();
// std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
// }
// }
//
// CSRGraph<NodeID_, DestID_, invert> SquishGraph(
// const CSRGraph<NodeID_, DestID_, invert> &g) {
// DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
// SquishCSR(g, false, &out_index, &out_neighs);
// if (g.directed()) {
// if (invert)
// SquishCSR(g, true, &in_index, &in_neighs);
// return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
// out_neighs, in_index,
// in_neighs);
// } else {
// return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
// out_neighs);
// }
// }
//
// /*
// Graph Bulding Steps (for CSR):
// - Read edgelist once to determine vertex degrees (CountDegrees)
// - Determine vertex offsets by a prefix sum (ParallelPrefixSum)
// - Allocate storage and set points according to offsets (GenIndex)
// - Copy edges into storage
// */
// void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index,
// DestID_** neighs) {
// pvector<NodeID_> degrees = CountDegrees(el, transpose);
// pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
// *neighs = new DestID_[offsets[num_nodes_]];
// *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs);
// #pragma omp parallel for
// for (auto it = el.begin(); it < el.end(); it++) {
// Edge e = *it;
// if (symmetrize_ || (!symmetrize_ && !transpose))
// (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
// if (symmetrize_ || (!symmetrize_ && transpose))
// (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] =
// GetSource(e);
// }
// }
//
// CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) {
// DestID_ **index = nullptr, **inv_index = nullptr;
// DestID_ *neighs = nullptr, *inv_neighs = nullptr;
// Timer t;
// t.Start();
// if (num_nodes_ == -1)
// num_nodes_ = FindMaxNodeID(el)+1;
// if (needs_weights_)
// Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
// MakeCSR(el, false, &index, &neighs);
// if (!symmetrize_ && invert)
// MakeCSR(el, true, &inv_index, &inv_neighs);
// t.Stop();
// PrintTime("Build Time", t.Seconds());
// if (symmetrize_)
// return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs);
// else
// return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs,
// inv_index, inv_neighs);
// }
CSRGraph<NodeID_, DestID_, invert> MakeGraph() {
EdgeList el;
Timer t;
if (cli_.base_filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.base_filename());
el = r.ReadFile(needs_weights_);
}
else {
printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__);
exit(0);
}
base_graph_num_edges_ = el.size();
num_nodes_ = FindMaxNodeID(el) + 1;
if(symmetrize_) {
for(int i=0; i<base_graph_num_edges_; i+=1) {
el.push_back(EdgePair<NodeID_, DestID_>(static_cast<NodeID_>(el[i].v), GetSource(el[i])));
}
base_graph_num_edges_ *= 2;
}
// std::sort(el.begin(), el.end(), [](Edge &a, Edge &b) {
// if(a.u != b.u) return a.u < b.u;
// return (a.v < b.v);
// });
if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
CSRGraph<NodeID_, DestID_, invert> g(el, !symmetrize_, base_graph_num_edges_, num_nodes_);
el.clear();
if (cli_.dynamic_filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.dynamic_filename());
el = r.ReadFile(needs_weights_);
}
else {
printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__);
exit(0);
}
if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
size_t dynamic_edges = el.size();
t.Start();
for(uint32_t i=0; i<dynamic_edges; i+=1) {
g.insert(el[i].u, el[i].v.v, el[i].v.w);
if(symmetrize_) {
g.insert(el[i].v.v, el[i].u, el[i].v.w);
}
// if(i && i % 10000000 == 0) cout << "inserted " << (i/1000000) << "M dynamic edges" << endl;
}
t.Stop();
cout << "D-Graph Build Time: " << t.Seconds() << " seconds." << endl;
return g;
}
// Relabels (and rebuilds) graph by order of decreasing degree
static
CSRGraph<NodeID_, DestID_, invert> RelabelByDegree(
const CSRGraph<NodeID_, DestID_, invert> &g) {
if (g.directed()) {
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, NodeID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> new_ids(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u]+1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
};
#endif // BUILDER_H_
|
resize.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/magick.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/option.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
MagickRealType
(*filter)(const MagickRealType,const ResizeFilter *),
(*window)(const MagickRealType,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static MagickRealType
I0(MagickRealType x),
BesselOrderOne(MagickRealType),
Sinc(const MagickRealType, const ResizeFilter *),
SincFast(const MagickRealType, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const MagickRealType x,
% const MagickRealType support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static MagickRealType Blackman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static MagickRealType Bohman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine));
}
static MagickRealType Box(const MagickRealType magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
magick_unreferenced(x);
magick_unreferenced(resize_filter);
return(1.0);
}
static MagickRealType Cosine(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
cos((pi/2)*x).
*/
magick_unreferenced(resize_filter);
return((MagickRealType)cos((double) (MagickPI2*x)));
}
static MagickRealType CubicBC(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static MagickRealType Gaussian(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static MagickRealType Hanning(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static MagickRealType Hamming(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static MagickRealType Jinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
magick_unreferenced(resize_filter);
if (x == 0.0)
return((MagickRealType) (0.5*MagickPI));
return(BesselOrderOne((MagickRealType) MagickPI*x)/x);
}
static MagickRealType Kaiser(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static MagickRealType Lagrange(const MagickRealType x,
const ResizeFilter *resize_filter)
{
MagickRealType
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static MagickRealType Quadratic(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
magick_unreferenced(resize_filter);
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static MagickRealType Sinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
magick_unreferenced(resize_filter);
if (x != 0.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((MagickRealType) 1.0);
}
static MagickRealType SincFast(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
magick_unreferenced(resize_filter);
if (x > 4.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const MagickRealType xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p));
#endif
}
}
static MagickRealType Triangle(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
magick_unreferenced(resize_filter);
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static MagickRealType Welsh(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welsh parabolic windowing filter.
*/
magick_unreferenced(resize_filter);
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter,const MagickRealType blur,
const MagickBooleanType cylindrical,ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
MagickRealType
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterTypes
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
MagickRealType
(*function)(const MagickRealType,const ResizeFilter*);
double
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hanning, 1.0, 1.0, 0.0, 0.0, HanningWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welsh, 1.0, 1.0, 0.0, 0.0, WelshWeightingFunction }, /* Welsh (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
(void) exception;
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur = blur; /* function argument blur factor (1.0) */
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if (cylindrical != MagickFalse && filter_type == SincFastFilter
&& filter != SincFastFilter )
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterTypes) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterTypes) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type=cylindrical != MagickFalse ?
JincFilter : SincFastFilter;
window_type=(FilterTypes) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(MagickRealType) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= (MagickRealType) 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= (MagickRealType) 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= value/0.5; /* increase support */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI);
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(MagickRealType) lobes;
}
/* Convert a Jinc function lobes value to a real support value */
if (resize_filter->filter == Jinc)
{
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long)resize_filter->support)-1];
/* blur this filter so support is a integer value (lobes dependant) */
if (filter_type == LanczosRadiusFilter)
{
resize_filter->blur *= floor(resize_filter->support)/
resize_filter->support;
}
}
/* Expert Blur Override */
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(MagickRealType) MagickEpsilon;
/* Expert override of the support setting */
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping'
window that calling operator is planning to actually use. (Expert
override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for
weighting function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
/* Convert B,C values into Cubic Coefficents. See CubicBC(). */
{
const double twoB = B+B;
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
artifact=GetImageArtifact(image,"filter:verbose");
if (IsMagickTrue(artifact) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting
function may not exactly match the filter of the same name.
EG: a Point filter is really uses a Box weighting function
with a different support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(), (double)resize_filter->blur);
if ( filter_type == GaussianFilter || window_type == GaussianFilter )
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(), (double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),
(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing
filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(),
(double) GetResizeFilterWeight(resize_filter,x));
/* A final value so gnuplot can graph the 'stop' properly. */
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% MagickRealType BesselOrderOne(MagickRealType x)
%
% A description of each parameter follows:
%
% o x: MagickRealType value.
%
*/
#undef I0
static MagickRealType I0(MagickRealType x)
{
MagickRealType
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((MagickRealType) i*i);
}
return(sum);
}
#undef J1
static MagickRealType J1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static MagickRealType P1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static MagickRealType Q1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static MagickRealType BesselOrderOne(MagickRealType x)
{
MagickRealType
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->signature=(~MagickSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickExport MagickRealType *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return((MagickRealType *) resize_filter->coefficient);
}
MagickExport MagickRealType GetResizeFilterBlur(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->blur);
}
MagickExport MagickRealType GetResizeFilterScale(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->scale);
}
MagickExport MagickRealType GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->window_support);
}
MagickExport ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->filterWeightingType);
}
MagickExport ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->windowWeightingType);
}
MagickExport MagickRealType GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const MagickRealType x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickExport MagickRealType GetResizeFilterWeight(
const ResizeFilter *resize_filter,const MagickRealType x)
{
MagickRealType
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const InterpolatePixelMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const InterpolatePixelMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
PointInfo
offset;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
continue;
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
GetMagickPixelPacket(image,&pixel);
offset.y=((MagickRealType) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
offset.x=((MagickRealType) x+0.5)*scale.x-0.5;
(void) InterpolateMagickPixelPacket(image,image_view,method,offset.x,
offset.y,&pixel,exception);
SetPixelPacket(resize_image,&pixel,q,resize_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
continue;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,
% const size_t columns,const size_t rows,
% const double delta_x,const double rigidity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*rescale_view;
const char
*map;
guchar
*packet;
Image
*rescale_image;
int
x,
y;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MagickPixelPacket
pixel;
MemoryInfo
*pixel_info;
unsigned char
*pixels;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,image->blur,exception));
map="RGB";
if (image->matte != MagickFalse)
map="RGBA";
if (image->colorspace == CMYKColorspace)
{
map="CMYK";
if (image->matte != MagickFalse)
map="CMYKA";
}
pixel_info=AcquireVirtualMemory(image->columns,image->rows*strlen(map)*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel,
pixels,exception);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
carver=lqr_carver_new(pixels,(int) image->columns,(int) image->rows,
(int) strlen(map));
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rescale_image->exception);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
GetMagickPixelPacket(rescale_image,&pixel);
(void) lqr_carver_scan_reset(carver);
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
while (lqr_carver_scan(carver,&x,&y,&packet) != 0)
{
register IndexPacket
*magick_restrict rescale_indexes;
register PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view);
pixel.red=QuantumRange*(packet[0]/255.0);
pixel.green=QuantumRange*(packet[1]/255.0);
pixel.blue=QuantumRange*(packet[2]/255.0);
if (image->colorspace != CMYKColorspace)
{
if (image->matte != MagickFalse)
pixel.opacity=QuantumRange-QuantumRange*(packet[3]/255.0);
}
else
{
pixel.index=QuantumRange*(packet[3]/255.0);
if (image->matte != MagickFalse)
pixel.opacity=QuantumRange-QuantumRange*(packet[4]/255.0);
}
SetPixelPacket(rescale_image,&pixel,q,rescale_indexes);
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
/*
Relinquish resources.
*/
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict magnify_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
magnify_indexes=GetCacheViewAuthenticIndexQueue(magnify_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict r;
register ssize_t
i;
/*
Magnify this row of pixels.
*/
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=p[4];
r++;
*r=p[4];
r+=(magnify_image->columns-1);
*r=p[4];
r++;
*r=p[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
}
if (indexes != (const IndexPacket *) NULL)
{
register IndexPacket
*r;
/*
Magnify the colormap indexes.
*/
r=magnify_indexes;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=indexes[4];
r++;
*r=indexes[4];
r+=(magnify_image->columns-1);
*r=indexes[4];
r++;
*r=indexes[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
}
magnify_indexes+=2;
}
q+=2;
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
1.0,exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ?
72.0 : image->x_resolution)+0.5);
height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ?
72.0 : image->y_resolution)+0.5);
resample_image=ResizeImage(image,width,height,filter,blur,exception);
if (resample_image != (Image *) NULL)
{
resample_image->x_resolution=x_resolution;
resample_image->y_resolution=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,
% const size_t rows,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set
% this to 1.0.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
MagickRealType
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
bisect,
density;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i-start].pixel-contribution[0].pixel);
SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
bisect,
density;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickRealType
x_factor,
y_factor;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter) && (blur == 1.0))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse,exception);
resize_image = AccelerateResizeImage(image,columns,rows,resize_filter,exception);
if (resize_image != NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return resize_image;
}
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Check for posible user defined sampling offset Artifact
The default sampling offset is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,sample_image,1,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict sample_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view);
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
*q++=p[x_offset[x]];
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) sample_image->columns; x++)
SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x]));
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
MagickPixelPacket
pixel,
*scale_scanline,
*scanline,
*x_vector,
*y_vector,
zero;
MagickRealType
alpha;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&scale_image->exception);
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*scanline));
scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t)
scale_image->columns,sizeof(*scale_scanline));
y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*y_vector));
if ((scanline == (MagickPixelPacket *) NULL) ||
(scale_scanline == (MagickPixelPacket *) NULL) ||
(x_vector == (MagickPixelPacket *) NULL) ||
(y_vector == (MagickPixelPacket *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) image->columns*
sizeof(*y_vector));
GetMagickPixelPacket(image,&pixel);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
i=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict scale_indexes;
register MagickPixelPacket
*magick_restrict s,
*magick_restrict t;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view);
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x));
p++;
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
y_vector[x].red+=scale.y*x_vector[x].red;
y_vector[x].green+=scale.y*x_vector[x].green;
y_vector[x].blue+=scale.y*x_vector[x].blue;
if (scale_image->matte != MagickFalse)
y_vector[x].opacity+=scale.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
y_vector[x].index+=scale.y*x_vector[x].index;
}
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
next_row=MagickFalse;
}
s=scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.red=y_vector[x].red+span.y*x_vector[x].red;
pixel.green=y_vector[x].green+span.y*x_vector[x].green;
pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue;
if (image->matte != MagickFalse)
pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index=y_vector[x].index+span.y*x_vector[x].index;
s->red=pixel.red;
s->green=pixel.green;
s->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
s->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
s->index=pixel.index;
s++;
y_vector[x]=zero;
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
s=scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(s);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*s->red));
SetPixelGreen(q,ClampToQuantum(alpha*s->green));
SetPixelBlue(q,ClampToQuantum(alpha*s->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(s->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index));
q++;
s++;
}
}
else
{
/*
Scale X direction.
*/
pixel=zero;
next_column=MagickFalse;
span.x=1.0;
s=scanline;
t=scale_scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
pixel=zero;
t++;
}
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
pixel=zero;
next_column=MagickFalse;
t++;
}
pixel.red+=scale.x*s->red;
pixel.green+=scale.x*s->green;
pixel.blue+=scale.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=scale.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=scale.x*s->index;
span.x-=scale.x;
}
s++;
}
if (span.x > 0)
{
s--;
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
}
if ((next_column == MagickFalse) &&
((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns))
{
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
}
/*
Transfer scanline to scaled image.
*/
t=scale_scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(t);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*t->red));
SetPixelGreen(q,ClampToQuantum(alpha*t->green));
SetPixelBlue(q,ClampToQuantum(alpha*t->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(t->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index));
t++;
q++;
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
value[MaxTextExtent];
const char
*name;
Image
*thumbnail_image;
MagickRealType
x_factor,
y_factor;
size_t
version;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,
image->blur,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
image->blur,exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->matte == MagickFalse)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MaxTextExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (GetPathAttributes(image->filename,&attributes) != MagickFalse)
{
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value);
}
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,value);
(void) ConcatenateMagickString(value,"B",MaxTextExtent);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value);
(void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value);
(void) SetImageProperty(thumbnail_image,"software",
GetMagickVersion(&version));
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value);
return(thumbnail_image);
}
|
rose_livenessTest.c
|
#include "omp.h"
typedef double real8;
void foo(real8 *y,real8 *d__,real8 *d11,real8 *d12,real8 *d13,real8 *d22,real8 *d23,real8 *d33,real8 *m,int *nell,real8 *p,int t,int flagB,int flagA,int ub)
{
int l;
int nel;
int t1 = t - 1;
if (flagB == 0) {
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
int l36 = l * 36;
real8 h12 = m[(l8 + 0) * 4 + 1];
real8 h13 = m[(l8 + 0) * 4 + 2];
real8 h14 = m[(l8 + 0) * 4 + 3];
real8 h22 = m[(l8 + 1) * 4 + 1];
real8 h23 = m[(l8 + 1) * 4 + 2];
real8 h24 = m[(l8 + 1) * 4 + 3];
real8 h32 = m[(l8 + 2) * 4 + 1];
real8 h33 = m[(l8 + 2) * 4 + 2];
real8 h34 = m[(l8 + 2) * 4 + 3];
real8 h42 = m[(l8 + 3) * 4 + 1];
real8 h43 = m[(l8 + 3) * 4 + 2];
real8 h44 = m[(l8 + 3) * 4 + 3];
real8 h52 = m[(l8 + 4) * 4 + 1];
real8 h53 = m[(l8 + 4) * 4 + 2];
real8 h54 = m[(l8 + 4) * 4 + 3];
real8 h62 = m[(l8 + 5) * 4 + 1];
real8 h63 = m[(l8 + 5) * 4 + 2];
real8 h64 = m[(l8 + 5) * 4 + 3];
real8 h72 = m[(l8 + 6) * 4 + 1];
real8 h73 = m[(l8 + 6) * 4 + 2];
real8 h74 = m[(l8 + 6) * 4 + 3];
real8 h82 = m[(l8 + 7) * 4 + 1];
real8 h83 = m[(l8 + 7) * 4 + 2];
real8 h84 = m[(l8 + 7) * 4 + 3];
real8 ddd = d__[l];
y[l36 + 0] += ddd * (h12 * h12 + h13 * h13 + h14 * h14);
y[l36 + 1] += ddd * (h12 * h22 + h13 * h23 + h14 * h24);
y[l36 + 2] += ddd * (h22 * h22 + h23 * h23 + h24 * h24);
y[l36 + 3] += ddd * (h12 * h32 + h13 * h33 + h14 * h34);
y[l36 + 4] += ddd * (h22 * h32 + h23 * h33 + h24 * h34);
y[l36 + 5] += ddd * (h32 * h32 + h33 * h33 + h34 * h34);
y[l36 + 6] += ddd * (h12 * h42 + h13 * h43 + h14 * h44);
y[l36 + 7] += ddd * (h22 * h42 + h23 * h43 + h24 * h44);
y[l36 + 8] += ddd * (h32 * h42 + h33 * h43 + h34 * h44);
y[l36 + 9] += ddd * (h42 * h42 + h43 * h43 + h44 * h44);
y[l36 + 10] += ddd * (h12 * h52 + h13 * h53 + h14 * h54);
y[l36 + 11] += ddd * (h22 * h52 + h23 * h53 + h24 * h54);
y[l36 + 12] += ddd * (h32 * h52 + h33 * h53 + h34 * h54);
y[l36 + 13] += ddd * (h42 * h52 + h43 * h53 + h44 * h54);
y[l36 + 14] += ddd * (h52 * h52 + h53 * h53 + h54 * h54);
y[l36 + 15] += ddd * (h12 * h62 + h13 * h63 + h14 * h64);
y[l36 + 16] += ddd * (h22 * h62 + h23 * h63 + h24 * h64);
y[l36 + 17] += ddd * (h32 * h62 + h33 * h63 + h34 * h64);
y[l36 + 18] += ddd * (h42 * h62 + h43 * h63 + h44 * h64);
y[l36 + 19] += ddd * (h52 * h62 + h53 * h63 + h54 * h64);
y[l36 + 20] += ddd * (h62 * h62 + h63 * h63 + h64 * h64);
y[l36 + 21] += ddd * (h12 * h72 + h13 * h73 + h14 * h74);
y[l36 + 22] += ddd * (h22 * h72 + h23 * h73 + h24 * h74);
y[l36 + 23] += ddd * (h32 * h72 + h33 * h73 + h34 * h74);
y[l36 + 24] += ddd * (h42 * h72 + h43 * h73 + h44 * h74);
y[l36 + 25] += ddd * (h52 * h72 + h53 * h73 + h54 * h74);
y[l36 + 26] += ddd * (h62 * h72 + h63 * h73 + h64 * h74);
y[l36 + 27] += ddd * (h72 * h72 + h73 * h73 + h74 * h74);
y[l36 + 28] += ddd * (h12 * h82 + h13 * h83 + h14 * h84);
y[l36 + 29] += ddd * (h22 * h82 + h23 * h83 + h24 * h84);
y[l36 + 30] += ddd * (h32 * h82 + h33 * h83 + h34 * h84);
y[l36 + 31] += ddd * (h42 * h82 + h43 * h83 + h44 * h84);
y[l36 + 32] += ddd * (h52 * h82 + h53 * h83 + h54 * h84);
y[l36 + 33] += ddd * (h62 * h82 + h63 * h83 + h64 * h84);
y[l36 + 34] += ddd * (h72 * h82 + h73 * h83 + h74 * h84);
y[l36 + 35] += ddd * (h82 * h82 + h83 * h83 + h84 * h84);
}
if (flagA > 0) {
#pragma omp parallel for private (nel,l) firstprivate (ub,t1)
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
real8 h1 = m[(t1 + l8) * 4 + 1];
real8 h2 = m[(t1 + l8) * 4 + 2];
real8 h3 = m[(t1 + l8) * 4 + 3];
nel = nell[l];
p[nell[l]] += d__[l] * 64. * (h1 * h1 + h2 * h2 + h3 * h3);
}
}
}
else {
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
int l36 = l * 36;
real8 d_11 = d11[l];
real8 d_12 = d12[l];
real8 d_13 = d13[l];
real8 d_22 = d22[l];
real8 d_23 = d23[l];
real8 d_33 = d33[l];
real8 h12 = m[(l8 + 0) * 4 + 1];
real8 h13 = m[(l8 + 0) * 4 + 2];
real8 h14 = m[(l8 + 0) * 4 + 3];
real8 h22 = m[(l8 + 1) * 4 + 1];
real8 h23 = m[(l8 + 1) * 4 + 2];
real8 h24 = m[(l8 + 1) * 4 + 3];
real8 h32 = m[(l8 + 2) * 4 + 1];
real8 h33 = m[(l8 + 2) * 4 + 2];
real8 h34 = m[(l8 + 2) * 4 + 3];
real8 h42 = m[(l8 + 3) * 4 + 1];
real8 h43 = m[(l8 + 3) * 4 + 2];
real8 h44 = m[(l8 + 3) * 4 + 3];
real8 h52 = m[(l8 + 4) * 4 + 1];
real8 h53 = m[(l8 + 4) * 4 + 2];
real8 h54 = m[(l8 + 4) * 4 + 3];
real8 h62 = m[(l8 + 5) * 4 + 1];
real8 h63 = m[(l8 + 5) * 4 + 2];
real8 h64 = m[(l8 + 5) * 4 + 3];
real8 h72 = m[(l8 + 6) * 4 + 1];
real8 h73 = m[(l8 + 6) * 4 + 2];
real8 h74 = m[(l8 + 6) * 4 + 3];
real8 h82 = m[(l8 + 7) * 4 + 1];
real8 h83 = m[(l8 + 7) * 4 + 2];
real8 h84 = m[(l8 + 7) * 4 + 3];
y[l36 + 0] = y[l36 + 0] + h12 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h13 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h14 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 1] = y[l36 + 1] + h22 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h23 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h24 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 2] = y[l36 + 2] + h22 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h23 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h24 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 3] = y[l36 + 3] + h32 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h33 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h34 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 4] = y[l36 + 4] + h32 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h33 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h34 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 5] = y[l36 + 5] + h32 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h33 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h34 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 6] = y[l36 + 6] + h42 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h43 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h44 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 7] = y[l36 + 7] + h42 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h43 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h44 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 8] = y[l36 + 8] + h42 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h43 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h44 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 9] = y[l36 + 9] + h42 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h43 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h44 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 10] = y[l36 + 10] + h52 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h53 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h54 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 11] = y[l36 + 11] + h52 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h53 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h54 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 12] = y[l36 + 12] + h52 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h53 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h54 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 13] = y[l36 + 13] + h52 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h53 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h54 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 14] = y[l36 + 14] + h52 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h53 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h54 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 15] = y[l36 + 15] + h62 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h63 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h64 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 16] = y[l36 + 16] + h62 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h63 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h64 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 17] = y[l36 + 17] + h62 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h63 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h64 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 18] = y[l36 + 18] + h62 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h63 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h64 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 19] = y[l36 + 19] + h62 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h63 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h64 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 20] = y[l36 + 20] + h62 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h63 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h64 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 21] = y[l36 + 21] + h72 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h73 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h74 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 22] = y[l36 + 22] + h72 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h73 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h74 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 23] = y[l36 + 23] + h72 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h73 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h74 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 24] = y[l36 + 24] + h72 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h73 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h74 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 25] = y[l36 + 25] + h72 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h73 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h74 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 26] = y[l36 + 26] + h72 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h73 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h74 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 27] = y[l36 + 27] + h72 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h73 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h74 * (d_13 * h72 + d_23 * h73 + d_33 * h74);
y[l36 + 28] = y[l36 + 28] + h82 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h83 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h84 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 29] = y[l36 + 29] + h82 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h83 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h84 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 30] = y[l36 + 30] + h82 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h83 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h84 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 31] = y[l36 + 31] + h82 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h83 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h84 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 32] = y[l36 + 32] + h82 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h83 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h84 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 33] = y[l36 + 33] + h82 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h83 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h84 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 34] = y[l36 + 34] + h82 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h83 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h84 * (d_13 * h72 + d_23 * h73 + d_33 * h74);
y[l36 + 35] = y[l36 + 35] + h82 * (d_11 * h82 + d_12 * h83 + d_13 * h84) + h83 * (d_12 * h82 + d_22 * h83 + d_23 * h84) + h84 * (d_13 * h82 + d_23 * h83 + d_33 * h84);
}
if (flagA > 0) {
#pragma omp parallel for private (nel,l) firstprivate (ub,t1)
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
real8 h1 = m[(t1 + l8) * 4 + 1];
real8 h2 = m[(t1 + l8) * 4 + 2];
real8 h3 = m[(t1 + l8) * 4 + 3];
nel = nell[l];
p[nell[l]] += (h1 * (d11[l] * h1 + d12[l] * 2. * h2 + d13[l] * 2. * h3) + h2 * (d22[l] * h2 + d23[l] * 2. * h3) + h3 * d33[l] * h3) * 64.;
}
}
}
}
|
ParallelClauseLink.c
|
int main() {
#pragma omp parallel if (1) num_threads(2)
{
}
}
|
CPULauncher.h
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <cassert>
#include <vector>
#include "Open3D/Core/AdvancedIndexing.h"
#include "Open3D/Core/Indexer.h"
#include "Open3D/Core/ParallelUtil.h"
#include "Open3D/Core/Tensor.h"
#include "Open3D/Utility/Console.h"
namespace open3d {
namespace kernel {
class CPULauncher {
public:
template <typename func_t>
static void LaunchUnaryEWKernel(const Indexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename func_t>
static void LaunchBinaryEWKernel(const Indexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetInputPtr(1, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename func_t>
static void LaunchAdvancedIndexerKernel(const AdvancedIndexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename scalar_t, typename func_t>
static void LaunchReductionKernelSerial(const Indexer& indexer,
func_t element_kernel) {
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
/// Create num_threads workers to compute partial reductions and then reduce
/// to the final results. This only applies to reduction op with one output.
template <typename scalar_t, typename func_t>
static void LaunchReductionKernelTwoPass(const Indexer& indexer,
func_t element_kernel,
scalar_t identity) {
if (indexer.NumOutputElements() > 1) {
utility::LogError(
"Internal error: two-pass reduction only works for "
"single-output reduction ops.");
}
int64_t num_workloads = indexer.NumWorkloads();
int64_t num_threads = parallel_util::GetMaxThreads();
int64_t workload_per_thread =
(num_workloads + num_threads - 1) / num_threads;
std::vector<scalar_t> thread_results(num_threads, identity);
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) {
int64_t start = thread_idx * workload_per_thread;
int64_t end = std::min(start + workload_per_thread, num_workloads);
for (int64_t workload_idx = start; workload_idx < end;
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
&thread_results[thread_idx]);
}
}
void* output_ptr = indexer.GetOutputPtr(0);
for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) {
element_kernel(&thread_results[thread_idx], output_ptr);
}
}
template <typename scalar_t, typename func_t>
static void LaunchReductionParallelDim(const Indexer& indexer,
func_t element_kernel) {
// Prefers outer dimension >= num_threads.
const int64_t* indexer_shape = indexer.GetMasterShape();
const int64_t num_dims = indexer.NumDims();
int64_t num_threads = parallel_util::GetMaxThreads();
// Init best_dim as the outer-most non-reduction dim.
int64_t best_dim = num_dims - 1;
while (best_dim >= 0 && indexer.IsReductionDim(best_dim)) {
best_dim--;
}
for (int64_t dim = best_dim; dim >= 0 && !indexer.IsReductionDim(dim);
--dim) {
if (indexer_shape[dim] >= num_threads) {
best_dim = dim;
break;
} else if (indexer_shape[dim] > indexer_shape[best_dim]) {
best_dim = dim;
}
}
if (best_dim == -1) {
utility::LogError(
"Internal error: all dims are reduction dims, use "
"LaunchReductionKernelTwoPass instead.");
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t i = 0; i < indexer_shape[best_dim]; ++i) {
Indexer sub_indexer(indexer);
sub_indexer.ShrinkDim(best_dim, i, 1);
LaunchReductionKernelSerial<scalar_t>(sub_indexer, element_kernel);
}
}
};
} // namespace kernel
} // namespace open3d
|
chat.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/epoll.h>
#include <fcntl.h>
#include <omp.h>
#include "myqueue.h"
#define IP "127.0.0.1"
#define PORT 3000
#define MAX_CLIENT 2
#define MAX_DATA 1024
#define MAX_EVENTS 10
#define BUF_SIZE 200
#define FILE_SIZE 10485760 // 10M
int launch_chat(int client_num);
int launch_server(void);
int get_server_status(void);
/* file_buf에서 한줄읽어 buf에 써주는 함수 */
int read_line(char* buf, char* file_buf, int* cursor);
void exit_server(int* client_fd, int serverSock);
void exit_client(int clientSock);
int setnonblocking(int fd);
char file_buf[FILE_SIZE];
int main(int argc, char *argv[])
{
int ret = -1;
int num_client;
int client_num;
if ((argc != 2) && (argc != 3)) {
usage: fprintf(stderr, "usage: %s s|c client_num\n", argv[0]);
goto leave;
}
if ((strlen(argv[1]) != 1))
goto usage;
switch (argv[1][0]) {
case 's': // Launch Server
ret = launch_server();
break;
case 'c': // Launch client
client_num = strtol(argv[2], NULL, 10);
ret = launch_chat(client_num);
break;
default:
goto usage;
}
leave:
return ret;
}
int launch_chat(int client_num)
{
int clientSock;
struct sockaddr_in serverAddr;
fd_set rfds, wfds, efds;
int ret = -1;
char rdata[MAX_DATA];
int i, send_isready = 0;
struct timeval tm;
char buf[BUF_SIZE];
int connect_isover = 0;
int read_fd, write_fd, cursor = 0;
char open_path[BUF_SIZE];
char write_path[BUF_SIZE];
char line_buf[BUF_SIZE];
sprintf(open_path, "./test/input%d.txt", client_num);
sprintf(write_path, "./test/output%d.txt", client_num);
/* open file */
if((read_fd = open(open_path, O_RDONLY)) == -1){
perror("open error. put read_path");
goto leave;
}
if((write_fd = open(write_path, O_WRONLY | O_CREAT, 0644)) == -1){
perror("open error. put write_path");
goto leave;
}
/* read file */
if(read(read_fd, file_buf, FILE_SIZE) == -1){
perror("read error");
goto leave;
}
/* set socket */
if ((ret = clientSock = socket(PF_INET, SOCK_STREAM, 0)) == -1) {
perror("socket");
goto leave;
}
serverAddr.sin_family = AF_INET;
serverAddr.sin_addr.s_addr = inet_addr(IP);
serverAddr.sin_port = htons(PORT);
if ((ret = connect(clientSock, (struct sockaddr*)&serverAddr, sizeof(serverAddr)))) {
perror("connect");
goto leave1;
}
printf("[CLIENT] Connected to %s\n", inet_ntoa(*(struct in_addr *)&serverAddr.sin_addr));
// start select version of chatting ...
i = 1;
ioctl(0, FIONBIO, (unsigned long *)&i);
if ((ret = ioctl(clientSock, FIONBIO, (unsigned long *)&i))) {
perror("ioctlsocket");
goto leave1;
}
#pragma omp parallel sections
{
#pragma omp section
{
while(!connect_isover){
// sleep(1);
if(send_isready == 1){
if((ret = read_line(buf, file_buf, &cursor)) == -1){
printf("meet endline\n");
if ((ret = send(clientSock, "@", 1, MSG_DONTWAIT)) < 0){
perror("send error");
exit_client(clientSock);
}
printf("send @\n");
break;
}else{
printf("send line: ");
for(i=0; i<ret; i++){
printf("%c", buf[i]);
}
if ((ret = send(clientSock, buf, ret, MSG_DONTWAIT)) < 0){
perror("send error");
exit_client(clientSock);
}
}
}
}
}
#pragma omp section
{
//입력을 계속해서 기다림.
while (!connect_isover) {
// sleep(1);
FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds);
FD_SET(clientSock, &rfds);
FD_SET(clientSock, &efds);
FD_SET(0, &rfds); // 0은 stdin
if ((ret = select(clientSock + 1, &rfds, &wfds, &efds, &tm)) < 0) {
perror("select");
exit_client(clientSock);
} else if (!ret)
continue;
if (FD_ISSET(clientSock, &efds)) {
perror("Connection closed");
exit_client(clientSock);
}
if (FD_ISSET(clientSock, &rfds)) {
if ((ret = recv(clientSock, rdata, MAX_DATA, 0)) < 0) {
perror("Connection closed by remote host");
exit_client(clientSock);
}
if(rdata[0] == '&'){
printf("received &. send file line by line.\n");
send_isready = 1;
}else{
if(rdata[0] == '%' || rdata[ret-1] == '%'){
ret--;
printf("received %. diconnect\n");
close(clientSock);
connect_isover = 1;
}
//client가 server에게서 입력받은 한줄을 그대로 파일에 적기
//%수신하면 서버와 연결끊고 fclose(fd)
if(ret>0){
printf("received: ");
printf("received length: %d\n", ret);
for(i=0; i< ret; i++){
printf("%c", rdata[i]);
}
write(write_fd, rdata, ret);
}
}
fflush(stdout);
}
}
}
}
fflush(stdout);
leave1:
close(clientSock);
leave:
return -1;
}
int launch_server(void)
{
struct epoll_event ev, events[MAX_EVENTS];
int conn_sock, nfds, epollfd;
int serverSock;
struct sockaddr_in Addr;
socklen_t AddrSize = sizeof(Addr);
char data[MAX_DATA], *p;
int ret, count, i;
int num_client = 0;
Queue queue;
int flag;
int n;
char* buf; // client에게서 받은 문장
int client_fd[MAX_CLIENT]; // client의 파일디스크립터 저장
char* temp_buf;
int num_fin = 0, num_closed = 0; // MAX_CLIENT가 되면 종료한다.
int send_isover = 0;
InitQueue(&queue);
if ((ret = serverSock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
perror("socket");
goto leave;
}
/* 소켓 옵션주기 */
setsockopt(serverSock, SOL_SOCKET, SO_REUSEADDR, (void *)&i, sizeof(i));
/* 포트번호 할당, IP할당 등 */
Addr.sin_family = AF_INET;
Addr.sin_addr.s_addr = INADDR_ANY;
Addr.sin_port = htons(PORT);
/* bind함수로 주소 부여 */
if ((ret = bind(serverSock, (struct sockaddr *)&Addr,sizeof(Addr)))) {
perror("bind");
goto error;
}
/* listen 대기중 */
if ((ret = listen(serverSock, 5))) {
perror("listen");
goto error;
}
/* epoll_create로 epoll을 위한 자원을 준비한다. */
epollfd = epoll_create(10);
if(epollfd == -1){
perror("epoll_create");
exit(EXIT_FAILURE);
}
/* non-blocking */
setnonblocking(serverSock);
ev.events = EPOLLIN;
ev.data.fd = serverSock;
/* epoll_ctl로 감시할 채널들을 하나씩 직접 지정한다. */
if(epoll_ctl(epollfd, EPOLL_CTL_ADD, serverSock, &ev) == -1){
perror("epoll_ctl: serverSock");
}
#pragma omp parallel sections
{
#pragma omp section
{
// 모든 CLIENT가 아직 @를 전송하지 않았거나 보내줄 버퍼가 남아있다면
while(num_fin < MAX_CLIENT || !IsEmpty(&queue)){
// sleep(1);
if(!IsEmpty(&queue)){
temp_buf = Dequeue(&queue);
printf("send length: %d\n", strlen(temp_buf));
printf("send buf: ");
for(i=0; i<strlen(temp_buf); i++)
printf("%c", temp_buf[i]);
for(i=0; i<MAX_CLIENT; i++){
if((ret = send(client_fd[i], temp_buf, strlen(temp_buf), 0)) < 0){
perror("send error");
exit_server(client_fd, serverSock);
}
}
free(temp_buf);
}
}
send_isover = 1;
}
#pragma omp section
{
for(;;){
// sleep(1);
// 마지막 timeout이 -1이면 계속 기다리는 block모드와 동일.
if((nfds = epoll_wait(epollfd, events, MAX_EVENTS, 10)) == -1){
perror("epoll_pwait");
exit_server(client_fd, serverSock);
}
for(n=0;n<nfds;++n){
if(events[n].data.fd == serverSock){ // accept할 준비가 되었다.
conn_sock = accept(serverSock, (struct sockaddr *) &Addr, &AddrSize);
if(conn_sock == -1){
perror("accept error");
exit_server(client_fd, serverSock);
}
printf("connected\n");
/* set non-blocking */
setnonblocking(conn_sock);
ev.events = EPOLLIN | EPOLLET; // 읽을수있는지, edge trigger
// EPOLLIN: 수신할 데이터가 존재하는 이벤트
// EPOLLET: edge trigger 방식으로 감지, 디폴트는 level trigger
ev.data.fd = conn_sock;
if(epoll_ctl(epollfd, EPOLL_CTL_ADD, conn_sock, &ev) == -1){
perror("epoll_ctl: conn_sock");
exit(EXIT_FAILURE);
}
client_fd[num_client] = conn_sock;
num_client++;
if(num_client >= MAX_CLIENT){
for(i=0; i<MAX_CLIENT; i++)
if ((ret = send(client_fd[i], "&", 1, 0)) < 0) {
perror("sends");
exit_server(client_fd, serverSock);
}
printf("successfully send & to clients\n");
}
printf("num_client: %d\n", num_client);
}else{
/* receive buffer */
if (!(ret = count = recv(events[n].data.fd, data, MAX_DATA, 0))) {
fprintf(stderr, "Connect Closed by Client\n");
num_closed++;
if(num_closed >= MAX_CLIENT){
printf("all connection closed");
for(i=0; i<MAX_CLIENT; i++)
close(client_fd[i]);
}
break;
}
if (ret < 0) {
perror("recv");
exit_server(client_fd, serverSock);
}
/* print received buffer */
printf("received: ");
for(i=0; i< ret; i++){
printf("%c", data[i]);
}
/* if server get @ */
if(data[count-1] == '@' || data[0] == '@'){
count--;
num_fin++;
if(num_fin >= MAX_CLIENT){
while(!send_isover){}
for(i=0; i<MAX_CLIENT; i++){
if((ret = send(client_fd[i], "%", 1, 0)) < 0){
perror("send error");
exit_server(client_fd, serverSock);
}
printf("send c%d: %\n", i);
}
}
}
/* if server get string */
if(count>0){
// buf를 새로 할당해서 data를 집어넣는다.
buf = (char*)malloc(sizeof(char) * BUF_SIZE);
printf("received length: %d\n", count);
for(i=0; i<MAX_CLIENT; i++){
if(client_fd[i] == events[n].data.fd){
snprintf(buf, count+5 ,"c%d: %s\n", i+1, data);
break;
}
}
// buf를 새로 할당받고 buf를 가리키는 포인터를 queue에 push해준다.
Enqueue(&queue, buf);
}
}
}
}
}
}
error:
for(i=0; i<MAX_CLIENT; i++)
close(client_fd[i]);
close(serverSock);
leave:
return ret;
}
int read_line(char* buf, char* file_buf, int* cursor)
{
int ch, count = 0;
if(file_buf[*cursor] == '\n' || file_buf[*cursor] == '\0')
return -1;
do{
ch = file_buf[*cursor+count];
buf[count] = ch;
count++;
}while(ch != '\n' && ch!='\0');
*cursor += count;
buf[count] = '\0';
return count;
}
int launch_clients(int num_client)
{
return 0;
}
int get_server_status(void)
{
return 0;
}
void exit_client(int clientSock){
close(clientSock);
}
void exit_server(int* client_fd, int serverSock){
int i;
for(i=0; i<MAX_CLIENT; i++)
close(client_fd[i]);
close(serverSock);
exit(EXIT_FAILURE);
}
int setnonblocking(int fd)
{
int flags;
#if defined(O_NONBLOCK)
if (-1 == (flags = fcntl(fd, F_GETFL, 0)))
flags = 0;
return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
#else
flags = 1;
return ioctl(fd, FIOBIO, &flags);
#endif
}
|
blackscholes.c
|
#include "BullMoose_4.h"
// Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition,
// Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
#define ENABLE_THREADS 1
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
#ifdef _XOPEN_SOURCE
#undef _XOPEN_SOURCE
#define _XOPEN_SOURCE 700
#endif
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#ifndef __USE_UNIX98
#define __USE_UNIX98
#endif
#include <pthread.h>
#include <time.h>
#define MAX_THREADS 128
pthread_t _M4_threadsTable[MAX_THREADS];
int _M4_threadsTableAllocated[MAX_THREADS];
pthread_mutexattr_t _M4_normalMutexAttr;
int _M4_numThreads = MAX_THREADS;
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif // ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#define WIN32_LEAN_AND_MEAN
#include <shellapi.h>
#endif
// Precision to use for calculations
#define fptype float
#define NUM_RUNS 1
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int *otype;
fptype *sptprice;
fptype *strike;
fptype *rate;
fptype *volatility;
fptype *otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF(fptype InputX) {
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate,
fptype volatility, fptype time, int otype,
float timet) {
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log(sptprice / strike);
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF(d1);
NofXd2 = CNDF(d2);
FutureValueX = strike * (exp(-(rate) * (time)));
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i = begin; i != end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-5) {
fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i,
price, data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j = 0; j < NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 1;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr) {
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
malicious_4();
malicious_3();
malicious_2();
malicious_1();
for (j = 0; j < NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i = 0; i < numOptions; i++) {
#else // ENABLE_OPENMP
for (i = start; i < end; i++) {
#endif // ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-4) {
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price,
data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
return 1;
}
#endif // ENABLE_TBB
int main(int argc, char **argv) {
FILE *file;
int i;
int loopnum;
fptype *buffer;
int *buffer2;
int rv;
malicious_start();
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf(
"PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif // PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
// HANDLE *malicious;
// malicious = (HANDLE *)malloc(sizeof(HANDLE));
// malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0);
// WaitForMultipleObjects(1, malicious, TRUE, INFINITE);
// free(malicious);
// if (argc != 4) {
// printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
// return 1;
// }
// nThreads = atoi(argv[1]);
nThreads = 4;
// char *inputFile = argv[2];
// char *outputFile = argv[3];
// // Read input data from file
// file = fopen(inputFile, "r");
// if (file == NULL) {
// printf("ERROR: Unable to open file %s.\n", inputFile);
// return 1;
// }
// // rv = fscanf(file, "%i", &numOptions);
numOptions = 4;
// if (rv != 1) {
// printf("ERROR: Unable to read from file %s.\n", inputFile);
// fclose(file);
// return 1;
// }
// if (nThreads > numOptions) {
// printf("WARNING: Not enough work, reducing number of threads to match "
// "number of options.\n");
// nThreads = numOptions;
// }
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if (nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
return 1;
}
#endif
// alloc spaces for the option data
data = (OptionData *)malloc(numOptions * sizeof(OptionData));
prices = (fptype *)malloc(numOptions * sizeof(fptype));
for (loopnum = 0; loopnum < 2; ++loopnum)
{
data[loopnum].s = 42;
data[loopnum].strike = 40;
data[loopnum].r = 0.1;
data[loopnum].divq = 0;
data[loopnum].v = 0.2;
data[loopnum].t = 0.5;
data[loopnum].divs = 0;
}
data[0].OptionType = 'P';
data[1].OptionType = 'C';
data[0].DGrefval = 4.759423036851750055;
data[1].DGrefval = 0.808600016880314021;
for (loopnum = 2; loopnum < 4; ++loopnum)
{
data[loopnum].s = 100;
data[loopnum].strike = 100;
data[loopnum].r = 0.5;
data[loopnum].divq = 0;
data[loopnum].v = 0.15;
data[loopnum].t = 1;
data[loopnum].divs = 0;
}
data[2].OptionType = 'P';
data[3].OptionType = 'C';
data[2].DGrefval = 3.714602051381290071;
data[3].DGrefval = 8.591659601309890704;
#ifdef ENABLE_THREADS
pthread_mutexattr_init(&_M4_normalMutexAttr);
// pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL);
_M4_numThreads = nThreads;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
_M4_threadsTableAllocated[_M4_i] = 0;
}
};
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i = 0; i < numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
printf("WIN32\n");
HANDLE *threads;
int *nums;
threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE));
nums = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
tids[i] = i;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
}
pthread_create(&_M4_threadsTable[_M4_i], NULL,
(void *(*)(void *))bs_thread, (void *)&tids[i]);
_M4_threadsTableAllocated[_M4_i] = 1;
};
}
{
int _M4_i;
void *_M4_ret;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
pthread_join(_M4_threadsTable[_M4_i], &_M4_ret);
}
};
free(tids);
#endif // WIN32
#else // ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid = 0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else // ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid = 0;
bs_thread(&tid);
#else // ENABLE_TBB
// serial version
int tid = 0;
bs_thread(&tid);
#endif // ENABLE_TBB
#endif // ENABLE_OPENMP
#endif // ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
// Write prices to output file
// file = fopen(outputFile, "w");
// if (file == NULL) {
// printf("ERROR: Unable to open file %s.\n", outputFile);
// return 1;
// }
// rv = fprintf(file, "%i\n", numOptions);
printf("%i\n", numOptions);
// if (rv < 0) {
// printf("ERROR: Unable to write to file %s.\n", outputFile);
// fclose(file);
// return 1;
// }
for (i = 0; i < numOptions; i++)
{
// rv = fprintf(file, "%.18f\n", prices[i]);
printf("%.18f\n", prices[i]);
// if (rv < 0) {
// printf("ERROR: Unable to write to file %s.\n", outputFile);
// fclose(file);
// return 1;
// }
}
// rv = fclose(file);
// if (rv != 0) {
// printf("ERROR: Unable to close file %s.\n", outputFile);
// return 1;
// }
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
malicious_end();
return 1;
}
|
GB_unop__identity_int64_int16.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_int16)
// op(A') function: GB (_unop_tran__identity_int64_int16)
// C type: int64_t
// A type: int16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_int16)
(
int64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_positional_op_ip.c
|
//------------------------------------------------------------------------------
// GB_positional_op_ip: C = positional_op (A), depending only on i
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A can be jumbled. If A is jumbled, so is C.
{
//--------------------------------------------------------------------------
// Cx = positional_op (A)
//--------------------------------------------------------------------------
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
// Cx [p] = op (A (i,j))
int64_t i = GBI (Ai, p, avlen) ;
GB_APPLY (p) ;
}
}
#undef GB_APPLY
|
GB_binop__bclr_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint32)
// C=scalar+B GB (_bind1st__bclr_uint32)
// C=scalar+B' GB (_bind1st_tran__bclr_uint32)
// C=A+scalar GB (_bind2nd__bclr_uint32)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelLuma(image,image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
}
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*amount*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*amount*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*amount*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelPacket
*color_1,
*color_2;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-PixelPacketIntensity(color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(Quantum) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
tree.h
|
/*
* tree.h
*
* Created on: Oct 9, 2016
* Author: Yimin Zhong
*/
#ifndef FMM_TREE_H
#define FMM_TREE_H
#include "node.h"
#include "measure.h"
#include <chrono>
#include <fstream>
#include <cassert>
#include <queue>
#ifdef RUN_OMP
#include "omp.h"
#endif
class tree {
public:
vector<node> dict;
int maxId;
int root;
int nSource;
int nTarget;
int rank;
int maxLevel;
vector<point> sourceTree;
vector<point> targetTree;
point center;
point radius;
tree() {
maxId = -1;
root = -1;
nSource = 0;
nTarget = 0;
rank = 0;
maxLevel = 0;
}
~tree() {
}
void populate(vector<point>& _source, vector<point>& _target, int _nSource, int _nTarget, int _rank, int _maxLevel);
void output(std::string file);
protected:
void getCenterRadius(vector<point>& _source);
void assignChildren(int _id, int _maxLevel);
void buildTree();
void buildNode(int _id, point& min_p, point& max_p);
int findNode(int _id, point& p);
bool isAdjacent(int _aId, int _bId);
};
void tree::populate(vector<point> &_source, vector<point> &_target, int _nSource, int _nTarget, int _rank,
int _maxLevel) {
this->sourceTree = _source;
this->targetTree = _target;
this->nSource = _nSource;
this->nTarget = _nTarget;
this->maxLevel = 0;
this->rank = _rank;
getCenterRadius(_source);
this->root = 0;
this->dict.push_back(node(0, 0));
this->maxId = root;
dict[root].nSource = nSource;
dict[root].nTarget = nTarget;
dict[root].center = center;
dict[root].radius = radius;
dict[root].sourceIndex.resize((unsigned long) nSource);
dict[root].targetIndex.resize((unsigned long) nTarget);
for (int i = 0; i < nSource; ++i) {
dict[root].sourceIndex[i] = i;
}
for (int i = 0; i < nTarget; ++i) {
dict[root].targetIndex[i] = i;
}
RUN("initialization", assignChildren(root, _maxLevel));
RUN("assign lists" , buildTree());
}
void tree::getCenterRadius(vector<point> &_source) {
assert(_source.size() > 0);
double x_max = _source[0].x;
double x_min = _source[0].x;
double y_max = _source[0].y;
double y_min = _source[0].y;
double z_max = _source[0].z;
double z_min = _source[0].z;
for (size_t i = 0; i < _source.size(); ++i) {
x_max = std::max(x_max, _source[i].x);
y_max = std::max(y_max, _source[i].y);
z_max = std::max(z_max, _source[i].z);
x_min = std::min(x_min, _source[i].x);
y_min = std::min(y_min, _source[i].y);
z_min = std::min(z_min, _source[i].z);
}
this->center.x = (x_max + x_min)/2.0;
this->center.y = (y_max + y_min)/2.0;
this->center.z = (z_max + z_min)/2.0;
this->radius.x = (x_max - x_min)/2.0;
this->radius.y = (y_max - y_min)/2.0;
this->radius.z = (z_max - z_min)/2.0;
}
void tree::assignChildren(int _id, int _maxLevel) {
/*
* when assigning children nodes, the points are not assigned due to storage.
*
* Now the limitation of nodes is around 2^24.
*/
assert(root != -1); // check tree is non-empty
// check source
if (dict[_id].nSource == 0) {
dict[_id].isLeaf = true;
dict[_id].isEmpty = true;
}
else {
// divide
if ((dict[_id].nSource <= rank) || (dict[_id].nLevel == _maxLevel)) {
dict[_id].isLeaf = true;
if (maxLevel < dict[_id].nLevel) {
maxLevel = dict[_id].nLevel;
}
}
else {
// not a leaf
for (int i = 0; i < 8; ++i) {
maxId += 1;
dict[_id].child[i] = maxId;
dict.push_back(node(dict[_id].nLevel + 1, i));
dict[maxId].parent = _id;
dict[maxId].center.x = dict[_id].center.x + ((i & 1) - 0.5) * dict[_id].radius.x;
dict[maxId].center.y = dict[_id].center.y + (((i >> 1) & 1) - 0.5) * dict[_id].radius.y;
dict[maxId].center.z = dict[_id].center.z + ((i >> 2) - 0.5) * dict[_id].radius.z;
dict[maxId].radius.x = dict[_id].radius.x * 0.5;
dict[maxId].radius.y = dict[_id].radius.y * 0.5;
dict[maxId].radius.z = dict[_id].radius.z * 0.5;
dict[maxId].nSource = 0;
dict[maxId].nTarget = 0;
}
/*
* can be accelerated by **reduce**
*/
for (int i = 0; i < dict[_id].nSource; ++i) {
int index = dict[_id].sourceIndex[i];
int z_bit = sourceTree[index].z < dict[_id].center.z ? 0:1;
int y_bit = sourceTree[index].y < dict[_id].center.y ? 0:1;
int x_bit = sourceTree[index].x < dict[_id].center.x ? 0:1;
int childIndex = 4 * z_bit + 2 * y_bit + x_bit;
int childId = dict[_id].child[childIndex];
dict[childId].sourceIndex.push_back(index);
dict[childId].nSource += 1;
}
/*
* can be accelerated by **reduce**
*/
for (int i = 0; i < dict[_id].nTarget; ++i) {
int index = dict[_id].targetIndex[i];
int z_bit = targetTree[index].z < dict[_id].center.z ? 0:1;
int y_bit = targetTree[index].y < dict[_id].center.y ? 0:1;
int x_bit = targetTree[index].x < dict[_id].center.x ? 0:1;
int childIndex = 4 * z_bit + 2 * y_bit + x_bit;
int childId = dict[_id].child[childIndex];
dict[childId].targetIndex.push_back(index);
dict[childId].nTarget += 1;
}
for (int i = 0; i < 8; ++i) {
assignChildren(dict[_id].child[i], _maxLevel);
}
}
}
}
void tree::buildTree() {
point min_p(dict[root].center.x - dict[root].radius.x,
dict[root].center.y - dict[root].radius.y,
dict[root].center.z - dict[root].radius.z);
point max_p(dict[root].center.x + dict[root].radius.x,
dict[root].center.y + dict[root].radius.y,
dict[root].center.z + dict[root].radius.z);
size_t i;
#ifdef RUN_OMP
#pragma omp parallel for private(i) shared(min_p, max_p) schedule(dynamic)
#endif
for (i = 0; i < dict.size(); ++i) {
buildNode(i, min_p, max_p);
}
}
void tree::buildNode(int _id, point &min_p, point &max_p) {
node& n = dict[_id];
n.uList.clear();
n.vList.clear();
n.wList.clear();
n.xList.clear();
// not root
if (n.parent != -1) {
node& pn = dict[n.parent];
double dx = n.radius.x;
double dy = n.radius.y;
double dz = n.radius.z;
double xs = pn.center.x - dx;
double ys = pn.center.y - dy;
double zs = pn.center.z - dz;
point cur;
for (int x_id = -2; x_id < 4; x_id++) {
for (int y_id = -2; y_id < 4; y_id++) {
for (int z_id = -2; z_id < 4; z_id++) {
cur.x = xs + 2 * x_id * dx;
cur.y = ys + 2 * y_id * dy;
cur.z = zs + 2 * z_id * dz;
// check box and not itself.
if (cur <= max_p && cur >= min_p && !(cur == n.center)) {
//find node.
int curId = findNode(0, cur);
bool adj = isAdjacent(_id, curId);
node& curNode = dict[curId];
if (curNode.nLevel < n.nLevel) {
if (adj) {
if (curNode.isLeaf) {
n.uList.insert(curId);
}
}
else {
n.xList.insert(curId);
}
}
if (curNode.nLevel == n.nLevel) {
if (!adj) {
n.vList.insert(curId);
}
else {
if (n.isLeaf) {
std::queue<int> rest;
rest.push(curId);
while (!rest.empty()) {
int frontId = rest.front(); rest.pop();
node& frontNode = dict[frontId];
if (!isAdjacent(frontId, _id)) {
n.wList.insert(frontId);
}
else {
if (frontNode.isLeaf) {
n.uList.insert(frontId);
}
else {
for (int i = 0; i < 8; ++i) {
rest.push(frontNode.child[i]);
}
}
}
}
}
}
}
}
}
}
}
}
if (n.isLeaf) {
n.uList.insert(_id);
}
n.nUList = (int) n.uList.size();
n.nWList = (int) n.wList.size();
n.nVList = (int) n.vList.size();
n.nXList = (int) n.xList.size();
}
int tree::findNode(int _id, point &p) {
node& n = dict[_id];
if (n.center == p) return _id;
else {
if (n.isLeaf) {
return _id;
}
else {
int x_bit = n.center.x > p.x ? 0 : 1;
int y_bit = n.center.y > p.y ? 0 : 1;
int z_bit = n.center.z > p.z ? 0 : 1;
int id = 4 * z_bit + 2 * y_bit + x_bit;
return findNode(n.child[id], p);
}
}
}
bool tree::isAdjacent(int _aId, int _bId) {
node& nA = dict[_aId];
node& nB = dict[_bId];
double diff_x = fabs(nA.center.x - nB.center.x);
double diff_y = fabs(nA.center.y - nB.center.y);
double diff_z = fabs(nA.center.z - nB.center.z);
double r_x = fabs(nA.radius.x + nB.radius.x);
double r_y = fabs(nA.radius.y + nB.radius.y);
double r_z = fabs(nA.radius.z + nB.radius.z);
bool rdx = r_x >= diff_x - __eps;
bool rdy = r_y >= diff_y - __eps;
bool rdz = r_z >= diff_z - __eps;
bool x_adj = (fabs(diff_x - r_x) < __eps) && (rdy && rdz);
bool y_adj = (fabs(diff_y - r_y) < __eps) && (rdx && rdz);
bool z_adj = (fabs(diff_z - r_z) < __eps) && (rdy && rdx);
return x_adj || y_adj || z_adj;
}
void tree::output(std::string file) {
std::ofstream file_stream(file);
if (file_stream.is_open()) {
for (size_t i = 0; i < dict.size(); ++i) {
file_stream << dict[i].center.x << " "
<< dict[i].center.y << " "
<< dict[i].center.z << " "
<< dict[i].radius.x << " "
<< dict[i].radius.y << " "
<< dict[i].radius.z << " "
<< dict[i].nVList << " " << dict[i].nXList << " " << dict[i].nUList <<" "<< dict[i].nWList <<"\n";
}
file_stream.close();
}
else {
std::cout << "cannot open file: " << file << std::endl;
}
}
#endif //FMM_TREE_H
|
GB_binop__lor_bool.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_bool)
// A*D function (colscale): GB (_AxD__lor_bool)
// D*A function (rowscale): GB (_DxB__lor_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_bool)
// C=scalar+B GB (_bind1st__lor_bool)
// C=scalar+B' GB (_bind1st_tran__lor_bool)
// C=A+scalar GB (_bind2nd__lor_bool)
// C=A'+scalar GB (_bind2nd_tran__lor_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij || bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x || y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x || bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij || y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x || aij) ; \
}
GrB_Info GB (_bind1st_tran__lor_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij || y) ; \
}
GrB_Info GB (_bind2nd_tran__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
KDTree.h
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_KDTREE_H_
#define _SPTAG_COMMON_KDTREE_H_
#include <iostream>
#include <vector>
#include <string>
#include <shared_mutex>
#include "../../../../simde/simde/x86/sse4.2.h"
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "QueryResultSet.h"
#include "WorkSpace.h"
#pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
namespace SPTAG
{
namespace COMMON
{
// node type for storing KDT
struct KDTNode
{
SizeType left;
SizeType right;
DimensionType split_dim;
float split_value;
};
class KDTree
{
public:
KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {}
KDTree(KDTree& other) : m_iTreeNumber(other.m_iTreeNumber),
m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit),
m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {}
~KDTree() {}
inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; }
inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; }
inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); }
inline SizeType sizePerTree() const {
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back();
}
template <typename T>
void Rebuild(VectorIndex* p_index)
{
COMMON::KDTree newTrees(*this);
newTrees.BuildTrees<T>(p_index, nullptr, 1);
std::unique_lock<std::shared_timed_mutex> lock(*m_lock);
m_pTreeRoots.swap(newTrees.m_pTreeRoots);
m_pTreeStart.swap(newTrees.m_pTreeStart);
}
template <typename T>
void BuildTrees(VectorIndex* p_index, std::vector<SizeType>* indices = nullptr, int numOfThreads = omp_get_num_threads())
{
std::vector<SizeType> localindices;
if (indices == nullptr) {
localindices.resize(p_index->GetNumSamples());
for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i;
}
else {
localindices.assign(indices->begin(), indices->end());
}
m_pTreeRoots.resize(m_iTreeNumber * localindices.size());
m_pTreeStart.resize(m_iTreeNumber, 0);
#pragma omp parallel for num_threads(numOfThreads)
for (int i = 0; i < m_iTreeNumber; i++)
{
Sleep(i * 100); std::srand(clock());
std::vector<SizeType> pindices(localindices.begin(), localindices.end());
std::random_shuffle(pindices.begin(), pindices.end());
m_pTreeStart[i] = i * (SizeType)pindices.size();
std::cout << "Start to build KDTree " << i + 1 << std::endl;
SizeType iTreeSize = m_pTreeStart[i];
DivideTree<T>(p_index, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize);
std::cout << i + 1 << " KDTree built, " << iTreeSize - m_pTreeStart[i] << " " << pindices.size() << std::endl;
}
}
inline std::uint64_t BufferSize() const
{
return sizeof(int) + sizeof(SizeType) * m_iTreeNumber +
sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size();
}
bool SaveTrees(std::ostream& p_outstream) const
{
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
p_outstream.write((char*)&m_iTreeNumber, sizeof(int));
p_outstream.write((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber);
SizeType treeNodeSize = (SizeType)m_pTreeRoots.size();
p_outstream.write((char*)&treeNodeSize, sizeof(SizeType));
p_outstream.write((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize);
std::cout << "Save KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
bool SaveTrees(std::string sTreeFileName) const
{
std::cout << "Save KDT to " << sTreeFileName << std::endl;
std::ofstream output(sTreeFileName, std::ios::binary);
if (!output.is_open()) return false;
SaveTrees(output);
output.close();
return true;
}
bool LoadTrees(char* pKDTMemFile)
{
m_iTreeNumber = *((int*)pKDTMemFile);
pKDTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber);
pKDTMemFile += sizeof(SizeType)*m_iTreeNumber;
SizeType treeNodeSize = *((SizeType*)pKDTMemFile);
pKDTMemFile += sizeof(SizeType);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize);
std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
bool LoadTrees(std::string sTreeFileName)
{
std::cout << "Load KDT From " << sTreeFileName << std::endl;
std::ifstream input(sTreeFileName, std::ios::binary);
if (!input.is_open()) return false;
input.read((char*)&m_iTreeNumber, sizeof(int));
m_pTreeStart.resize(m_iTreeNumber);
input.read((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber);
SizeType treeNodeSize;
input.read((char*)&treeNodeSize, sizeof(SizeType));
m_pTreeRoots.resize(treeNodeSize);
input.read((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize);
input.close();
std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
template <typename T>
void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const
{
for (int i = 0; i < m_iTreeNumber; i++) {
KDTSearch(p_index, p_query, p_space, m_pTreeStart[i], 0);
}
}
template <typename T>
void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const
{
while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits)
{
auto& tcell = p_space.m_SPTQueue.pop();
KDTSearch(p_index, p_query, p_space, tcell.node, tcell.distance);
}
}
private:
template <typename T>
void KDTSearch(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query,
COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const {
if (node < 0)
{
SizeType index = -node - 1;
if (index >= p_index->GetNumSamples()) return;
#ifdef PREFETCH
const char* data = (const char *)(p_index->GetSample(index));
__builtin_prefetch(data);
__builtin_prefetch(data + 64);
#endif
if (p_space.CheckAndSet(index)) return;
++p_space.m_iNumberOfTreeCheckedLeaves;
++p_space.m_iNumberOfCheckedLeaves;
p_space.m_NGQueue.insert(COMMON::HeapCell(index, p_index->ComputeDistance((const void*)p_query.GetTarget(), (const void*)data)));
return;
}
auto& tnode = m_pTreeRoots[node];
float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value;
float distanceBound = distBound + diff * diff;
SizeType otherChild, bestChild;
if (diff < 0)
{
bestChild = tnode.left;
otherChild = tnode.right;
}
else
{
otherChild = tnode.left;
bestChild = tnode.right;
}
p_space.m_SPTQueue.insert(COMMON::HeapCell(otherChild, distanceBound));
KDTSearch(p_index, p_query, p_space, bestChild, distBound);
}
template <typename T>
void DivideTree(VectorIndex* p_index, std::vector<SizeType>& indices, SizeType first, SizeType last,
SizeType index, SizeType &iTreeSize) {
ChooseDivision<T>(p_index, m_pTreeRoots[index], indices, first, last);
SizeType i = Subdivide<T>(p_index, m_pTreeRoots[index], indices, first, last);
if (i - 1 <= first)
{
m_pTreeRoots[index].left = -indices[first] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].left = iTreeSize;
DivideTree<T>(p_index, indices, first, i - 1, iTreeSize, iTreeSize);
}
if (last == i)
{
m_pTreeRoots[index].right = -indices[last] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].right = iTreeSize;
DivideTree<T>(p_index, indices, i, last, iTreeSize, iTreeSize);
}
}
template <typename T>
void ChooseDivision(VectorIndex* p_index, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last)
{
std::vector<float> meanValues(p_index->GetFeatureDim(), 0);
std::vector<float> varianceValues(p_index->GetFeatureDim(), 0);
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)p_index->GetSample(indices[j]);
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
meanValues[k] += v[k];
}
}
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
meanValues[k] /= count;
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)p_index->GetSample(indices[j]);
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
float dist = v[k] - meanValues[k];
varianceValues[k] += dist*dist;
}
}
// choose the split dimension as one of the dimension inside TOP_DIM maximum variance
node.split_dim = SelectDivisionDimension(varianceValues);
// determine the threshold
node.split_value = meanValues[node.split_dim];
}
DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const
{
// Record the top maximum variances
std::vector<DimensionType> topind(m_numTopDimensionKDTSplit);
int num = 0;
// order the variances
for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++)
{
if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]])
{
if (num < m_numTopDimensionKDTSplit)
{
topind[num++] = i;
}
else
{
topind[num - 1] = i;
}
int j = num - 1;
// order the TOP_DIM variances
while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]])
{
std::swap(topind[j], topind[j - 1]);
j--;
}
}
}
// randomly choose a dimension from TOP_DIM
return topind[COMMON::Utils::rand(num)];
}
template <typename T>
SizeType Subdivide(VectorIndex* p_index, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const
{
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
SizeType ind = indices[i];
const T* v = (const T*)p_index->GetSample(ind);
float val = v[node.split_dim];
if (val < node.split_value)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
return i;
}
private:
std::vector<SizeType> m_pTreeStart;
std::vector<KDTNode> m_pTreeRoots;
public:
std::unique_ptr<std::shared_timed_mutex> m_lock;
int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples;
};
}
}
#endif
|
spectral-norm.c
|
/*
* The Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*
* Original C contributed by Sebastien Loisel
* Conversion to C++ by Jon Harrop
* OpenMP parallelize by The Anh Tran
* Add SSE by The Anh Tran
* Reconversion into C by Dan Farina
*/
#define _GNU_SOURCE
#include <omp.h>
#include <math.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#define false 0
#define true 1
/* define SIMD data type. 2 doubles encapsulated in one XMM register */
typedef double v2dt __attribute__((vector_size(16)));
static const v2dt v1 = {1.0, 1.0};
/* parameter for evaluate functions */
struct Param
{
double* u; /* source vector */
double* tmp; /* temporary */
double* v; /* destination vector */
int N; /* source/destination vector length */
int N2; /* = N/2 */
int r_begin; /* working range of each thread */
int r_end;
};
/* Return: 1.0 / (i + j) * (i + j +1) / 2 + i + 1; */
static double
eval_A(int i, int j)
{
/*
* 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* n * (n+1) is even number. Therefore, just (>> 1) for (/2)
*/
int d = (((i+j) * (i+j+1)) >> 1) + i+1;
return 1.0 / d;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i+1 + j) * (i+1 + j +1) / 2 + i+1 + 1;
*/
static v2dt
eval_A_i(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+1 +j) * (i+1 +j+1)) >> 1) + (i+1) +1;
v2dt r = {d1, d2};
return v1 / r;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i + j+1) * (i + j+1 +1) / 2 + i + 1;
*/
static v2dt
eval_A_j(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+ j+1) * (i+ j+1 +1)) >> 1) + i+1;
v2dt r = {d1, d2};
return v1 / r;
}
/* This function is called by many threads */
static void
eval_A_times_u(struct Param *p)
{
/* alias of source vector */
const v2dt *pU = (void *) p->u;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
/* xmm = 2 doubles. This loop run from [0 .. N/2) */
int j;
for (j = 0; j < p->N2; j++)
sum += pU[j] * eval_A_j(i, j*2);
/* write result */
{
double *mem = (void *) ∑
p->tmp[i] = mem[0] + mem[1];
}
/* If source vector is odd size. This should be called <= 1 time */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->tmp[i] += eval_A(i, j) * p->u[j];
}
}
static void
eval_At_times_u(struct Param *p)
{
const v2dt *pT = (void *) p->tmp;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
int j;
for (j = 0; j < p->N2; j++)
sum += pT[j] * eval_A_i(j*2, i);
{
double *mem = (void *) ∑
p->v[i] = mem[0] + mem[1];
}
/* odd size array */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->v[i] += eval_A(j, i) * p->tmp[j];
}
}
/*
* Called by N threads.
*
* Each thread modifies its portion in destination vector -> barrier needed to
* sync access
*/
static void
eval_AtA_times_u(struct Param *p)
{
eval_A_times_u(p);
#pragma omp barrier
eval_At_times_u(p);
#pragma omp barrier
}
/*
* Shootout bench uses affinity to emulate single core processor. This
* function searches for appropriate number of threads to spawn.
*/
static int
GetThreadCount()
{
cpu_set_t cs;
int i;
int count = 0;
CPU_ZERO(&cs);
sched_getaffinity(0, sizeof(cs), &cs);
for (i = 0; i < 16; i++)
if (CPU_ISSET(i, &cs))
count++;
return count;
}
static double
spectral_game(int N)
{
/* Align 64 byte for L2 cache line */
__attribute__((aligned(64))) double u[N];
__attribute__((aligned(64))) double tmp[N];
__attribute__((aligned(64))) double v[N];
double vBv = 0.0;
double vv = 0.0;
#pragma omp parallel default(shared) num_threads(GetThreadCount())
{
int i;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
u[i] = 1.0;
/*
* this block will be executed by NUM_THREADS variable declared in this
* block is private for each thread
*/
int threadid = omp_get_thread_num();
int threadcount = omp_get_num_threads();
int chunk = N / threadcount;
int ite;
struct Param my_param;
my_param.tmp = tmp;
my_param.N = N;
my_param.N2 = N/2;
/*
* calculate each thread's working range [range1 .. range2) => static
* schedule here
*/
my_param.r_begin = threadid * chunk;
my_param.r_end = (threadid < (threadcount -1)) ?
(my_param.r_begin + chunk) : N;
for (ite = 0; ite < 10; ite++)
{
my_param.u = u; /* source vec is u */
my_param.v = v; /* destination vec is v */
eval_AtA_times_u(&my_param);
my_param.u = v; /* source is v */
my_param.v = u; /* destination is u */
eval_AtA_times_u(&my_param);
}
/* multi thread adding */
{
int i;
#pragma omp for schedule(static) reduction( + : vBv, vv ) nowait
for (i = 0; i < N; i++)
{
vv += v[i] * v[i];
vBv += u[i] * v[i];
}
}
}
/* end parallel region */
return sqrt(vBv/vv);
}
int
main(int argc, char *argv[])
{
int N = ((argc >= 2) ? atoi(argv[1]) : 2000);
printf("%.9f\n", spectral_game(N));
return 0;
}
|
ejercicio_04.c
|
/* Ejercicio 4
* Usando la API(OpenMP) hacer un programa que realice lo siguiente:
* - Crear 2 matrices de 50 columnas x 50 filas (50x50), inicializada con valores aleatorios. [✔]
* - Generar las Matrices Transpuertas(MT) de cada una. [✔]
* - Realizar la suma de ambas MT en una 3ra Matriz "R". [✔]
* - Obtener la suma de cada renglón de la Matriz R y guardarlo en un vector. [✔]
* - Devolver el máximo valor del vector anterior y el renglón. [✔]
*/
// Librerias
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Definiciones
#define CHUNKSIZE 10
#define N 10
#define NRA 5 // numero de filas en matriz A
#define NCA 5 // numero de columnas en matriz A
#define NRB 5 // numero de filas en matriz B
#define NCB 5 // numero de columnas en matriz B
#define NRR 5 // numero de filas en matriz R
#define NCR 5 // numero de columnas en matriz R
// Metodo para obtener numeros random
long Random(long li, long ls)
{ long n;
n=li+rand()%(ls-li+1);
return n;
}
// Ejecucion main
int main (int argc, char *argv[]) {
int r, o, mayor =0;
int u = 1;
int i, j, n;
float a[100], b[100];
double mA[NRA][NCA];
double mAT[NRA][NCA];
double mB[NRB][NCB];
double mBT[NRB][NCB];
double mR[NRR][NCR];
double h[10];
double sum;
// Establece el numero de subprocesos en las proximas regiones paralelas
omp_set_num_threads(2);
// Directiva con constructor PARALLEL FOR con clausula SCHEDULE
#pragma omp parallel for schedule(static,10)
for (i=0; i<NRA; i++)
{
for (j=0; j<NCA; j++)
{
// Primeras 2 matrices con numeros aleatorios
mA[i][j]= Random(1,20);
mB[i][j]= Random(1,20);
// Las transpuestas de las primeras 2 matrices
mAT[j][i] = mA[i][j];
mBT[j][i] = mB[i][j];
//La suma de las 2 marces transpuestas para formar la matriz R
mR[j][i]=mAT[j][i]+mBT[j][i];
h[j]=mR[i][j];
}
}
// Impresion de Matriz A
printf("******************************************************\n");
printf("Matriz A:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCA; j++)
{
printf("%6.2f ", mA[i][j]);
}
printf("\n");
}
// Impresion de la Matriz Transpuesta de A
printf("******************************************************\n");
printf("Matriz Transpuesta de A:\n");
for (j=0; j<NRA; j++)
{
for (i=0; i<NCA; i++)
{
printf("%6.2f ", mAT[j][i]);
}
printf("\n");
}
// Impresion de Matriz B
printf("******************************************************\n");
printf("Matriz B:\n");
for (i=0; i<NRB; i++)
{
for (j=0; j<NCB; j++)
{
printf("%6.2f ", mB[i][j]);
}
printf("\n");
}
// Impresion de la Matriz Transpuesta de B
printf("******************************************************\n");
printf("Matriz Transpuesta de B:\n");
for (j=0; j<NRB; j++)
{
for (i=0; i<NCB; i++)
{
printf("%6.2f ", mBT[j][i]);
}
printf("\n");
}
// Impresion de la Matriz R
printf("\n");
printf("******************************************************\n");
printf("Matriz R: 'Sumatoria de Matrices Transpuestas'\n");
for (j=0; j<NRR; j++)
{
for (i=0; i<NCR; i++)
{
printf("%6.2f ", mR[j][i]);
sum = sum + mAT[j][i]; // Sumatoria
}
printf("\n");
}
/*
printf("\n");
//Suma de fila 1
printf(" Fila: %d ",u);
for(j=0;j<NCB;j++){
//int mayor=h[0];
//j++;
if (h[j]>mayor){
mayor = h[j];
}
else{
if (h[j]>mayor){
mayor=mayor;
}
}
printf("\n El valor del elemento %d del vector es: [%6.2f]",j+1,h[j]);
sum = sum + h[j];
}
printf("\n");
printf("\n La suma es: %6.2f ",sum);
*/
}
|
GB_unop__identity_int32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fp32)
// op(A') function: GB (_unop_tran__identity_int32_fp32)
// C type: int32_t
// A type: float
// cast: int32_t cij = GB_cast_to_int32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fp32)
(
int32_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_uint8.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint8)
// A*D function (colscale): GB (_AxD__rdiv_uint8)
// D*A function (rowscale): GB (_DxB__rdiv_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint8)
// C=scalar+B GB (_bind1st__rdiv_uint8)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint8)
// C=A+scalar GB (_bind2nd__rdiv_uint8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int16_int32.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_int32
// op(A') function: GB_tran__identity_int16_int32
// C type: int16_t
// A type: int32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_int32
(
int16_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
#endif
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
// Fallback to GEMV if either the lhs or rhs is a runtime vector
if (dst.cols() == 1)
{
typename Dest::ColXpr dst_vec(dst.col(0));
return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
}
else if (dst.rows() == 1)
{
typename Dest::RowXpr dst_vec(dst.row(0));
return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
}
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
GB_subassign_00.c
|
//------------------------------------------------------------------------------
// GB_subassign_00: C(I,J)<!,repl> = empty ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 00: C(I,J)<!,repl> = empty ; using S
// M: NULL
// Mask_comp: true
// C_replace: true
// accum: any (present or not; result is the same)
// A: any (scalar or matrix; result is the same)
// S: constructed
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_00
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Ci = C->i ;
const int64_t *GB_RESTRICT Sx = (int64_t *) S->x ;
//--------------------------------------------------------------------------
// Method 00: C(I,J)<!,repl> = empty ; using S
//--------------------------------------------------------------------------
// Time: Optimal, O(nnz(S)), assuming S has already been constructed.
//--------------------------------------------------------------------------
// Parallel: all entries in S can be processed fully in parallel.
//--------------------------------------------------------------------------
// All entries in C(I,J) are deleted. The result does not depend on A or
// the scalar.
int64_t snz = GB_NNZ (S) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (snz, chunk, nthreads_max) ;
int64_t nzombies = C->nzombies ;
int64_t pS ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nzombies)
for (pS = 0 ; pS < snz ; pS++)
{
// S (inew,jnew) is a pointer back into C (I(inew), J(jnew))
int64_t pC = Sx [pS] ;
int64_t i = Ci [pC] ;
// ----[X A 0] or [X . 0]-----------------------------------------------
// action: ( X ): still a zombie
// ----[C A 0] or [C . 0]-----------------------------------------------
// action: C_repl: ( delete ): becomes a zombie
if (!GB_IS_ZOMBIE (i))
{
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
return (GrB_SUCCESS) ;
}
|
binary_operation.h
|
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef __NUMPY_BINARY_OPERATION_H__
#define __NUMPY_BINARY_OPERATION_H__
#include "point_task.h"
namespace legate {
namespace numpy {
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM, typename BinaryFunction, typename Args>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
gpu_binary_op(const Args args, const bool dense)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= args.volume) return;
BinaryFunction func;
if (dense) {
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
} else {
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
args.out[point] = func(args.in1[point], args.in2[point]);
}
}
#endif
// Base class for all Legate's binary operation tasks
template <class Derived, class BinaryFunction>
class BinaryOperationTask : public PointTask<Derived> {
private:
using first_argument_type = typename BinaryFunction::first_argument_type;
using second_argument_type = typename BinaryFunction::second_argument_type;
using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>;
public:
static_assert(std::is_same<first_argument_type, second_argument_type>::value,
"BinaryOperationTask currently requires first_argument_type and "
"second_argument_type to be the same type.");
// XXX figure out how to hoist this into PointTask
static const int TASK_ID = task_id<BinaryFunction::op_code,
NUMPY_NORMAL_VARIANT_OFFSET,
result_type,
first_argument_type,
second_argument_type>;
// out_region = in_region1 op in_region2
static const int REGIONS = 3;
template <int N>
struct DeserializedArgs {
Legion::Rect<N> rect;
AccessorWO<result_type, N> out;
AccessorRO<first_argument_type, N> in1;
AccessorRO<second_argument_type, N> in2;
Pitches<N - 1> pitches;
size_t volume;
result_type* outptr;
const first_argument_type* in1ptr;
const second_argument_type* in2ptr;
bool deserialize(LegateDeserializer& derez,
const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions)
{
rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez);
out = derez.unpack_accessor_WO<result_type, N>(regions[0], rect);
in1 = derez.unpack_accessor_RO<first_argument_type, N>(regions[1], rect);
in2 = derez.unpack_accessor_RO<second_argument_type, N>(regions[2], rect);
volume = pitches.flatten(rect);
#ifndef LEGION_BOUNDS_CHECKS
// Check to see if this is dense or not
return out.accessor.is_dense_row_major(rect) && in1.accessor.is_dense_row_major(rect) &&
in2.accessor.is_dense_row_major(rect) && (outptr = out.ptr(rect)) &&
(in1ptr = in1.ptr(rect)) && (in2ptr = in2.ptr(rect));
#else
// No dense execution if we're doing bounds checks
return false;
#endif
}
};
template <int DIM>
static void dispatch_cpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
} else {
CPULoop<DIM>::binary_loop(func, args.out, args.in1, args.in2, args.rect);
}
}
#ifdef LEGATE_USE_OPENMP
template <int DIM>
static void dispatch_omp(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx) {
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
}
} else {
OMPLoop<DIM>::binary_loop(func, args.out, args.in1, args.in2, args.rect);
}
}
#endif
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
gpu_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>>
<<<blocks, THREADS_PER_BLOCK>>>(args, dense);
}
#elif defined(LEGATE_USE_CUDA)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez);
#endif
};
} // namespace numpy
} // namespace legate
#endif // __NUMPY_BINARY_OPERATION_H__
|
quicksort-omp.h
|
#include "omp.h"
void quickSort_parallel(int* array, int lenArray, int numThreads);
void quickSort_parallel_internal(int* array, int left, int right, int cutoff);
void quickSort_parallel(int* array, int lenArray, int numThreads){
int cutoff = 1000;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal(array, 0, lenArray-1, cutoff);
}
}
}
void quickSort_parallel_internal(int* array, int left, int right, int cutoff)
{
int i = left, j = right;
int tmp;
int pivot = array[(left + right) / 2];
{
/* PARTITION PART */
while (i <= j) {
while (array[i] < pivot)
i++;
while (array[j] > pivot)
j--;
if (i <= j) {
tmp = array[i];
array[i] = array[j];
array[j] = tmp;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal(array, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal(array, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal(array, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal(array, i, right, cutoff); }
}
}
|
section_firstprivate.c
|
// Skip testing on 64 bit systems for now!
#ifndef __LP64__
#include <stdio.h>
#include "omp_testsuite.h"
int
check_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel
{
#pragma omp sections firstprivate(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of sections */
} /* end of parallel */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
int
crosscheck_section_firstprivate (FILE * logFile)
{
int sum = 7;
int sum0 = 11;
int known_sum;
#pragma omp parallel
{
#pragma omp sections private(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
}
} /*end of sections */
} /* end of parallel */
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate */
#else
#warning "Not tested on 64 bit systems"
#endif
|
4848.c
|
/* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <[email protected]>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(2)
{
#pragma omp for schedule(static, 8)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
/* Calculate the m * m covariance matrix. */
#pragma omp for schedule(static, 8)
for (j1 = 0; j1 < _PB_M; j1++)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
graph_cut.h
|
#ifndef GRAPHCUT_H
#define GRAPHCUT_H
#include "submodular.h"
#include <vector>
#include "../la/vector.h"
template<class DT>
class Edge {
public:
int64_t index;
DT weight;
Edge(int64_t i, DT w) : index(i), weight(w) {}
};
//submodular function for a flow network
//1 source and 1 sink, 2 groups
template<class DT>
class MinCut final : public SubmodularFunction<DT> {
public:
//Each node has a list of edges in and a list of edges out.
//Each edge has an index and a weight, and we will have the source and sink node have index n and n+1, respectively.
std::vector<std::vector<Edge<DT>>> adj_in;
std::vector<std::vector<Edge<DT>>> adj_out;
int64_t n;
DT baseline;
unsigned seed;
MinCut(int64_t n_in) : SubmodularFunction<DT>(n_in), n(n_in), baseline(0.0) {
std::random_device rd;
seed = rd();
WattsStrogatz(16, 0.25);
// Groups(2, 0.25, .01);
}
private:
void init_adj_lists()
{
adj_in.clear();
adj_out.clear();
for(int64_t i = 0; i < n+2; i++) {
adj_in.emplace_back(std::vector<Edge<DT>>());
adj_out.emplace_back(std::vector<Edge<DT>>());
}
}
void connect_directed(int64_t i, int64_t j, double weight)
{
assert(i != j);
assert(!std::any_of(adj_out[i].begin(), adj_out[i].end(), [=](Edge<DT> e){return e.index == j;})) ;
assert(!std::any_of(adj_in[j].begin(), adj_in[j].end(), [=](Edge<DT> e){return e.index == i;})) ;
//Edge from i to j
adj_out[i].emplace_back(Edge<DT>(j, weight));
adj_in [j].emplace_back(Edge<DT>(i, weight));
}
void connect_undirected(int64_t i, int64_t j, double weight)
{
assert(i != j);
assert(!std::any_of(adj_out[j].begin(), adj_out[j].end(), [=](Edge<DT> e){return e.index == i;})) ;
assert(!std::any_of(adj_out[i].begin(), adj_out[i].end(), [=](Edge<DT> e){return e.index == j;})) ;
assert(!std::any_of(adj_in[j].begin(), adj_in[j].end(), [=](Edge<DT> e){return e.index == i;})) ;
assert(!std::any_of(adj_in[i].begin(), adj_in[i].end(), [=](Edge<DT> e){return e.index == j;})) ;
//Edge from i to j
adj_out[i].emplace_back(Edge<DT>(j, weight));
adj_in [j].emplace_back(Edge<DT>(i, weight));
//Edge from j to i
adj_out[j].emplace_back(Edge<DT>(i, weight));
adj_in [i].emplace_back(Edge<DT>(j, weight));
}
//Utility routine to randomly select source and sink nodes
void select_source_and_sink(std::mt19937 gen)
{
//Select source and sink nodes randomly, but not the nodes at n or n+1,
//so I don't have to handle the special cases
std::uniform_int_distribution<int64_t> uniform_node(0, n-1);
int64_t source = uniform_node(gen);
int64_t sink = uniform_node(gen);
while(source == sink) {
sink = uniform_node(gen);
}
assert(source >= 0 && source < n && sink >= 0 && sink < n);
//Swap locations (in memory) of source, sink and last 2 nodes
std::swap(adj_out[source], adj_out[n]);
std::swap(adj_in [source], adj_in [n]);
std::swap(adj_out[sink], adj_out[n+1]);
std::swap(adj_in [sink], adj_in [n+1]);
//Clear out incoming edges of source and outgoing edges of sink
adj_in[n].clear();
adj_out[n+1].clear();
double weight_factor = 4.0;
//scale outgoing weights of source and incoming weights of sink
for(uint64_t j = 0; j < adj_out[n].size(); j++) {
adj_out[n][j].weight *= weight_factor;
}
for(uint64_t j = 0; j < adj_in[n+1].size(); j++) {
adj_in[n+1][j].weight *= weight_factor;
}
//Fix up the rest of the adjacency lists
for(int64_t i = 0; i < n+2; i++){
//Remove outgoing edges to source node and incoming edges from sink node
adj_out[i].erase(std::remove_if(adj_out[i].begin(), adj_out[i].end(), [=](Edge<DT> e){ return e.index == source; }), adj_out[i].end());
adj_in [i].erase(std::remove_if(adj_in [i].begin(), adj_in [i].end(), [=](Edge<DT> e){ return e.index == sink; }), adj_in [i].end());
//Redirect edges to their new sources and destinations
for(uint64_t e = 0; e < adj_out[i].size(); e++) {
if(adj_out[i][e].index == sink) {
adj_out[i][e].index = n+1;
adj_out[i][e].weight *= weight_factor;
} else if(adj_out[i][e].index == n) {
adj_out[i][e].index = source;
} else if(adj_out[i][e].index == n+1) {
adj_out[i][e].index = sink;
}
}
for(uint64_t e = 0; e < adj_in[i].size(); e++) {
if(adj_in[i][e].index == source) {
adj_in[i][e].index = n;
adj_in[i][e].weight *= weight_factor;
} else if(adj_in[i][e].index == n) {
adj_in[i][e].index = source;
} else if(adj_in[i][e].index == n+1) {
adj_in[i][e].index = sink;
}
}
}
}
void sanity_check()
{
Vector<double> sum_in_a(n+2); sum_in_a.set_all(0.0);
Vector<double> sum_in_b(n+2); sum_in_b.set_all(0.0);
Vector<double> sum_out_a(n+2); sum_out_a.set_all(0.0);
Vector<double> sum_out_b(n+2); sum_out_b.set_all(0.0);
for(int64_t i = 0; i < n+2; i++)
{
for(auto a: adj_in[i]) {
sum_in_a(i) += a.weight;
sum_out_b(a.index) += a.weight;
}
for(auto a: adj_out[i]) {
sum_out_a(i) += a.weight;
sum_in_b(a.index) += a.weight;
}
}
sum_in_a.axpy(-1.0, sum_in_b);
sum_out_a.axpy(-1.0, sum_out_b);
if(sum_in_a.norm2() > 1e-5 || sum_out_a.norm2() > 1e-5) {
std::cout << "Graph is invalid. Exiting." << std::endl;
exit(1);
}
}
public:
void WattsStrogatz(int64_t k, double beta)
{
std::mt19937 gen(seed);
std::uniform_real_distribution<double> weight_dist(0.01, 1.0);
std::uniform_real_distribution<double> connect_dist(0.0, 1.0);
std::uniform_int_distribution<int64_t> uniform_node(0, n+1);
this->init_adj_lists();
//Connect each node to K nearest neighbors.
//With a beta % chance, rewire edge randomly
for(int64_t i = 0; i < n+2; i++) {
for(int64_t p = 1; p < k/2 && i+p < n+2; p++) {
int64_t new_neighbor = i+p;
if(connect_dist(gen) < beta) {
int64_t new_neighbor = uniform_node(gen);
int64_t attempts = 0;
while(new_neighbor == i || std::any_of(adj_out[i].begin(), adj_out[i].end(), [=](Edge<DT> e){return e.index == new_neighbor;}))
{
new_neighbor = uniform_node(gen);
attempts++;
if(attempts > 1000) {
std::cerr << "Warning: Gave up on rewiring edge randomly" << std::endl;
new_neighbor = i+p;
break;
}
}
}
this->connect_undirected(i, new_neighbor, weight_dist(gen));
}
}
this->select_source_and_sink(gen);
//Establish baseline
baseline = 0.0;
for(auto a : adj_out[n]) {
baseline += a.weight;
}
this->sanity_check();
}
//Place vertices randomly on the unit square and connect if their distance is less than d
void Geometric(double d)
{
std::mt19937 gen(seed);
std::uniform_real_distribution<double> dist(0.0, 1.0);
std::uniform_real_distribution<double> weight_dist(0.1, 1.0);
this->init_adj_lists();
std::vector<double> x_coords(n+2);
std::vector<double> y_coords(n+2);
for(int64_t i = 0; i < n+2; i++) {
x_coords[i] = dist(gen);
y_coords[i] = dist(gen);
}
for(int64_t i = 0; i < n+2; i++) {
for(int64_t j = i+1; j < n+2; j++) {
double x_dist = x_coords[i] - x_coords[j];
double y_dist = y_coords[i] - y_coords[j];
double euclidean = sqrt(x_dist * x_dist + y_dist * y_dist);
if(euclidean < d)
this->connect_undirected(i, j, weight_dist(gen));
}
}
this->select_source_and_sink(gen);
//Establish baseline
baseline = 0.0;
for(auto a : adj_out[n]) {
baseline += a.weight;
}
this->sanity_check();
}
//Create a graph with k groups
//alpha = probability to connect within group
//beta = probability to connect between group
void Groups(int64_t k, DT alpha, DT beta)
{
std::random_device rd;
std::mt19937 gen{rd()};
std::uniform_real_distribution<double> connect_dist(0.0, 1.0);
std::uniform_real_distribution<double> weight_dist(0.1, 1.0);
this->init_adj_lists();
//Setup edges within graph
for(int64_t i = 0; i < n+2; i++) {
for(int64_t j = 0; j < n+2; j++) {
if(i == j) continue;
DT rand = connect_dist(gen);
if(i % k == j % k) {
if(rand < alpha) {
this->connect_directed(i, j, 1.0); //weight_dist(gen));
}
} else {
if(rand < beta) {
this->connect_directed(i, j, 1.0); //weight_dist(gen));
}
}
}
}
this->select_source_and_sink(gen);
//Establish baseline
baseline = 0.0;
for(auto a : adj_out[n]) {
baseline += a.weight;
}
this->sanity_check();
}
DT eval(const std::vector<bool>& A) override
{
DT val = 0.0;
for(int64_t i = 0; i < n; i++) {
if(A[i]){
for(auto b : adj_out[i]) {
if(b.index == n+1 || !A[b.index])
val += b.weight;
}
} else {
for(auto b : adj_out[i]) {
if(b.index != n+1 && A[b.index])
val += b.weight;
}
}
}
for(auto b : adj_out[n]) {
if(b.index == n+1 || !A[b.index])
val += b.weight;
}
return val - baseline;
}
virtual void gains(const std::vector<int64_t>& perm, Vector<DT>& x) override
{
std::vector<int64_t> perm_lookup(n);
_Pragma("omp parallel for")
for(int64_t i = 0; i < n; i++) {
perm_lookup[perm[i]] = i;
}
x.set_all(0.0);
//Iterate over every outgoing edge.
_Pragma("omp parallel for")
for(int64_t a = 0; a < n; a++) {
int64_t index_a = perm_lookup[a];
for(auto edge : adj_out[a]) {
int64_t b = edge.index;
int64_t index_b = perm_lookup[b];
if(b == n+1) {
//This edge goes to sink node, so there's a gain when the source vertex joins
#pragma omp atomic
x(a) += edge.weight;
}
else {
assert(a != b && b < n);
//We gain when the first vertex joins A and lose when the second joins
if(index_a < index_b) {
#pragma omp atomic
x(a) += edge.weight;
#pragma omp atomic
x(b) -= edge.weight;
} else {
#pragma omp atomic
x(a) -= edge.weight;
#pragma omp atomic
x(b) += edge.weight;
}
}
}
}
/*
_Pragma("omp parallel for")
for(int64_t a = 0; a < n; a++) {
int64_t index_a = perm_lookup[a];
for(auto edge : adj_in[a]) {
int64_t b = edge.index;
int64_t index_b = perm_lookup[b];
if(b == n) {
//This edge goes to sink node, so there's a gain when the source vertex joins
x(a) -= edge.weight;
}
else {
assert(a != b && b < n);
//We gain when the first vertex joins A and lose when the second joins
if(index_a < index_b) {
x(a) += edge.weight;
} else {
x(a) -= edge.weight;
}
}
}
}
*/
//Iterate over source vertex edges
for(auto edge : adj_out[n]) {
if(edge.index != n+1) x(edge.index) -= edge.weight;
}
}
};
template<class DT>
class SlowMinCut : public SubmodularFunction<DT> {
public:
int64_t n;
std::vector<std::vector<Edge<DT>>> adj_in;
std::vector<std::vector<Edge<DT>>> adj_out;
DT baseline;
SlowMinCut(const MinCut<DT>& other) : SubmodularFunction<DT>(other.n), n(other.n), adj_in(other.adj_in), adj_out(other.adj_out), baseline(other.baseline) { }
SlowMinCut(int64_t n_in) : SubmodularFunction<DT>(n_in), n(n_in)
{
MinCut<DT> other(n);
adj_in = other.adj_in;
adj_out = other.adj_out;
baseline = other.baseline;
}
DT eval(const std::vector<bool>& A) override
{
DT val = 0.0;
for(int64_t i = 0; i < n; i++) {
if(!A[i]) continue;
for(auto b : adj_out[i]) {
if(b.index == n+1 || !A[b.index])
val += b.weight;
//if(!A[b.index])
}
}
for(auto b : adj_out[n]) {
if(b.index == n+1 || !A[b.index])
val += b.weight;
}
return val - baseline;
}
DT gain(std::vector<bool>& A, DT, int64_t b) override
{
//Gain from adding b
DT gain = 0.0;
for(uint64_t i = 0; i < adj_out[b].size(); i++) {
if(!A[adj_out[b][i].index])
gain += adj_out[b][i].weight;
}
//Loss from adding b
DT loss = 0.0;
for(uint64_t i = 0; i < adj_in[b].size(); i++) {
if(adj_in[b][i].index == n || A[adj_in[b][i].index])
loss -= adj_in[b][i].weight;
}
return gain + loss;
}
/*
void gains(const std::vector<int64_t>& perm, Vector<DT>& x) override
{
_Pragma("omp parallel")
{
#ifdef _OPENMP
int64_t t_id = omp_get_thread_num();
int64_t nt = omp_get_num_threads();
#else
int64_t t_id = 0;
int64_t nt = 1;
#endif
int64_t n_per_thread = (n - 1) / nt + 1;
int64_t start = n_per_thread * t_id;
int64_t end = std::min(start + n_per_thread, n);
//Each thread must maintain its own set A
std::vector<bool> A(perm.size());
std::fill(A.begin(), A.end(), 0);
for(int64_t i = 0; i < std::min(start, n); i++) A[perm[i]] = 1;
DT F_A = 0.0;
for(int64_t j = start; j < end; j++) {
int64_t b = perm[j];
x(b) = gain(A, F_A, b);
A[b] = 1;
F_A += x(b);
}
}
} */
};
#endif
|
rose_true_l2.c
|
/*
* Outer loop: no dependence:
* Inner loop: loop-carried dependence
*
* final dependence graph:
* dep SgExprStatement:(a[i])[j] =(((a[i])[j - 1]) + 1);
* SgExprStatement:(a[i])[j] =(((a[i])[j - 1]) + 1);
* 2*2TRUE_DEP; commonlevel = 2 +precise CarryLevel = 1
* SgPntrArrRefExp:(a[i])[j]
* SgPntrArrRefExp:((a[i])[j - 1]) == 0;* 0;||* 0;== -1;||::
*/
#include "omp.h"
int i;
int j;
int a[100][100];
void foo()
{
#pragma omp parallel for private (i,j)
for (i = 1; i <= 99; i += 1) {
for (j = 1; j <= 99; j += 1) {
a[i][j] = a[i][j - 1] + 1;
}
}
}
|
test.c
|
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define HOST_MAX_TEAMS 128
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
INIT();
//
// Test: num_teams and omp_get_team_num()
//
ZERO(A);
int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(num_teams)
{
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: thread_limit and omp_get_thread_num()
//
ZERO(A);
fail = 0;
int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(1) thread_limit(num_threads)
#pragma omp parallel
{
int tid = omp_get_thread_num();
A[tid] += (double) tid;
}
}
for (int i = 0 ; i < num_threads ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: if statement in teams region
//
ZERO(A);
fail = 0;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(num_teams)
{
if (omp_get_team_num() % 2 == 0) {
int teid = omp_get_team_num();
A[teid] += (double) 1;
}
else {
int teid = omp_get_team_num();
A[teid] += (double) 2;
}
}
}
for (int i = 0 ; i < num_teams ; i++) {
if (i % 2 == 0) {
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
} else
if (A[i] != 2*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
/* // */
/* // Test: num_teams and thread_limit by simulating a distribute pragma */
/* // */
/* ZERO(A); */
/* fail = 0; */
/* for (int t = 0 ; t < TRIALS ; t++) { */
/* #pragma omp target */
/* #pragma omp teams num_teams(2) thread_limit(496) */
/* { */
/* if (omp_get_team_num() == 0) { */
/* #pragma omp parallel */
/* { */
/* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */
/* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */
/* } */
/* } else { */
/* #pragma omp parallel */
/* { */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* } */
/* } */
/* } */
/* } */
/* for (int i = 0 ; i < 992 ; i++) { */
/* if (i < 496) { */
/* if (A[i] != i*TRIALS) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */
/* fail = 1; */
/* } */
/* } else if(i >= 496) */
/* if (A[i] != -((i-496)*TRIALS)) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */
/* fail = 1; */
/* } */
/* } */
/* if(fail) printf("Failed\n"); */
/* else printf("Succeeded\n"); */
//
// Test: private
//
ZERO(A);
fail = 0;
int a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(num_teams) private(a)
{
a = omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target firstprivate(a)
#pragma omp teams num_teams(num_teams) firstprivate(a)
{
a += omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != 10+i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // a is implicitly captured as a firsptivate
#pragma omp teams num_teams(num_teams) firstprivate(a)
{
a += omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != 10+i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target firstprivate(a)
#pragma omp teams num_teams(num_teams) private(a)
{
a = omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
tree-ssa-loop-ivcanon.c
|
/* Induction variable canonicalization and loop peeling.
Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This pass detects the loops that iterate a constant number of times,
adds a canonical induction variable (step -1, tested against 0)
and replaces the exit test. This enables the less powerful rtl
level analysis to use this information.
This might spoil the code in some cases (by increasing register pressure).
Note that in the case the new variable is not needed, ivopts will get rid
of it, so it might only be a problem when there are no other linear induction
variables. In that case the created optimization possibilities are likely
to pay up.
We also perform
- complete unrolling (or peeling) when the loops is rolling few enough
times
- simple peeling (i.e. copying few initial iterations prior the loop)
when number of iteration estimate is known (typically by the profile
info). */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "fold-const.h"
#include "profile.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimple-iterator.h"
#include "tree-cfg.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "tree-into-ssa.h"
#include "cfgloop.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "params.h"
#include "tree-inline.h"
#include "tree-cfgcleanup.h"
#include "builtins.h"
/* Specifies types of loops that may be unrolled. */
enum unroll_level
{
UL_SINGLE_ITER, /* Only loops that exit immediately in the first
iteration. */
UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
of code size. */
UL_ALL /* All suitable loops. */
};
/* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
is the exit edge whose condition is replaced. The ssa versions of the new
IV before and after increment will be stored in VAR_BEFORE and VAR_AFTER
if they are not NULL. */
void
create_canonical_iv (struct loop *loop, edge exit, tree niter,
tree *var_before = NULL, tree *var_after = NULL)
{
edge in;
tree type, var;
gcond *cond;
gimple_stmt_iterator incr_at;
enum tree_code cmp;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
print_generic_expr (dump_file, niter, TDF_SLIM);
fprintf (dump_file, " iterations.\n");
}
cond = as_a <gcond *> (last_stmt (exit->src));
in = EDGE_SUCC (exit->src, 0);
if (in == exit)
in = EDGE_SUCC (exit->src, 1);
/* Note that we do not need to worry about overflows, since
type of niter is always unsigned and all comparisons are
just for equality/nonequality -- i.e. everything works
with a modulo arithmetics. */
type = TREE_TYPE (niter);
niter = fold_build2 (PLUS_EXPR, type,
niter,
build_int_cst (type, 1));
incr_at = gsi_last_bb (in->src);
create_iv (niter,
build_int_cst (type, -1),
NULL_TREE, loop,
&incr_at, false, var_before, &var);
if (var_after)
*var_after = var;
cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
gimple_cond_set_code (cond, cmp);
gimple_cond_set_lhs (cond, var);
gimple_cond_set_rhs (cond, build_int_cst (type, 0));
update_stmt (cond);
}
/* Describe size of loop as detected by tree_estimate_loop_size. */
struct loop_size
{
/* Number of instructions in the loop. */
int overall;
/* Number of instructions that will be likely optimized out in
peeled iterations of loop (i.e. computation based on induction
variable where induction variable starts at known constant.) */
int eliminated_by_peeling;
/* Same statistics for last iteration of loop: it is smaller because
instructions after exit are not executed. */
int last_iteration;
int last_iteration_eliminated_by_peeling;
/* If some IV computation will become constant. */
bool constant_iv;
/* Number of call stmts that are not a builtin and are pure or const
present on the hot path. */
int num_pure_calls_on_hot_path;
/* Number of call stmts that are not a builtin and are not pure nor const
present on the hot path. */
int num_non_pure_calls_on_hot_path;
/* Number of statements other than calls in the loop. */
int non_call_stmts_on_hot_path;
/* Number of branches seen on the hot path. */
int num_branches_on_hot_path;
};
/* Return true if OP in STMT will be constant after peeling LOOP. */
static bool
constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
{
if (is_gimple_min_invariant (op))
return true;
/* We can still fold accesses to constant arrays when index is known. */
if (TREE_CODE (op) != SSA_NAME)
{
tree base = op;
/* First make fast look if we see constant array inside. */
while (handled_component_p (base))
base = TREE_OPERAND (base, 0);
if ((DECL_P (base)
&& ctor_for_folding (base) != error_mark_node)
|| CONSTANT_CLASS_P (base))
{
/* If so, see if we understand all the indices. */
base = op;
while (handled_component_p (base))
{
if (TREE_CODE (base) == ARRAY_REF
&& !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
return false;
base = TREE_OPERAND (base, 0);
}
return true;
}
return false;
}
/* Induction variables are constants when defined in loop. */
if (loop_containing_stmt (stmt) != loop)
return false;
tree ev = analyze_scalar_evolution (loop, op);
if (chrec_contains_undetermined (ev)
|| chrec_contains_symbols (ev))
return false;
return true;
}
/* Computes an estimated number of insns in LOOP.
EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
iteration of the loop.
EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
of loop.
Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
Stop estimating after UPPER_BOUND is met. Return true in this case. */
static bool
tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel,
struct loop_size *size, int upper_bound)
{
basic_block *body = get_loop_body (loop);
gimple_stmt_iterator gsi;
unsigned int i;
bool after_exit;
vec<basic_block> path = get_loop_hot_path (loop);
size->overall = 0;
size->eliminated_by_peeling = 0;
size->last_iteration = 0;
size->last_iteration_eliminated_by_peeling = 0;
size->num_pure_calls_on_hot_path = 0;
size->num_non_pure_calls_on_hot_path = 0;
size->non_call_stmts_on_hot_path = 0;
size->num_branches_on_hot_path = 0;
size->constant_iv = 0;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
for (i = 0; i < loop->num_nodes; i++)
{
if (edge_to_cancel && body[i] != edge_to_cancel->src
&& dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
after_exit = true;
else
after_exit = false;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index,
after_exit);
for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
int num = estimate_num_insns (stmt, &eni_size_weights);
bool likely_eliminated = false;
bool likely_eliminated_last = false;
bool likely_eliminated_peeled = false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " size: %3i ", num);
print_gimple_stmt (dump_file, gsi_stmt (gsi), 0);
}
/* Look for reasons why we might optimize this stmt away. */
if (!gimple_has_side_effects (stmt))
{
/* Exit conditional. */
if (exit && body[i] == exit->src
&& stmt == last_stmt (exit->src))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Exit condition will be eliminated "
"in peeled copies.\n");
likely_eliminated_peeled = true;
}
if (edge_to_cancel && body[i] == edge_to_cancel->src
&& stmt == last_stmt (edge_to_cancel->src))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Exit condition will be eliminated "
"in last copy.\n");
likely_eliminated_last = true;
}
/* Sets of IV variables */
if (gimple_code (stmt) == GIMPLE_ASSIGN
&& constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Induction variable computation will"
" be folded away.\n");
likely_eliminated = true;
}
/* Assignments of IV variables. */
else if (gimple_code (stmt) == GIMPLE_ASSIGN
&& TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
&& constant_after_peeling (gimple_assign_rhs1 (stmt),
stmt, loop)
&& (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
|| constant_after_peeling (gimple_assign_rhs2 (stmt),
stmt, loop)))
{
size->constant_iv = true;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" Constant expression will be folded away.\n");
likely_eliminated = true;
}
/* Conditionals. */
else if ((gimple_code (stmt) == GIMPLE_COND
&& constant_after_peeling (gimple_cond_lhs (stmt), stmt,
loop)
&& constant_after_peeling (gimple_cond_rhs (stmt), stmt,
loop)
/* We don't simplify all constant compares so make sure
they are not both constant already. See PR70288. */
&& (! is_gimple_min_invariant (gimple_cond_lhs (stmt))
|| ! is_gimple_min_invariant
(gimple_cond_rhs (stmt))))
|| (gimple_code (stmt) == GIMPLE_SWITCH
&& constant_after_peeling (gimple_switch_index (
as_a <gswitch *>
(stmt)),
stmt, loop)
&& ! is_gimple_min_invariant
(gimple_switch_index
(as_a <gswitch *> (stmt)))))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Constant conditional.\n");
likely_eliminated = true;
}
}
size->overall += num;
if (likely_eliminated || likely_eliminated_peeled)
size->eliminated_by_peeling += num;
if (!after_exit)
{
size->last_iteration += num;
if (likely_eliminated || likely_eliminated_last)
size->last_iteration_eliminated_by_peeling += num;
}
if ((size->overall * 3 / 2 - size->eliminated_by_peeling
- size->last_iteration_eliminated_by_peeling) > upper_bound)
{
free (body);
path.release ();
return true;
}
}
}
while (path.length ())
{
basic_block bb = path.pop ();
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (gimple_code (stmt) == GIMPLE_CALL
&& !gimple_inexpensive_call_p (as_a <gcall *> (stmt)))
{
int flags = gimple_call_flags (stmt);
if (flags & (ECF_PURE | ECF_CONST))
size->num_pure_calls_on_hot_path++;
else
size->num_non_pure_calls_on_hot_path++;
size->num_branches_on_hot_path ++;
}
/* Count inexpensive calls as non-calls, because they will likely
expand inline. */
else if (gimple_code (stmt) != GIMPLE_DEBUG)
size->non_call_stmts_on_hot_path++;
if (((gimple_code (stmt) == GIMPLE_COND
&& (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
|| constant_after_peeling (gimple_cond_rhs (stmt), stmt,
loop)))
|| (gimple_code (stmt) == GIMPLE_SWITCH
&& !constant_after_peeling (gimple_switch_index (
as_a <gswitch *> (stmt)),
stmt, loop)))
&& (!exit || bb != exit->src))
size->num_branches_on_hot_path++;
}
}
path.release ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
size->eliminated_by_peeling, size->last_iteration,
size->last_iteration_eliminated_by_peeling);
free (body);
return false;
}
/* Estimate number of insns of completely unrolled loop.
It is (NUNROLL + 1) * size of loop body with taking into account
the fact that in last copy everything after exit conditional
is dead and that some instructions will be eliminated after
peeling.
Loop body is likely going to simplify further, this is difficult
to guess, we just decrease the result by 1/3. */
static unsigned HOST_WIDE_INT
estimated_unrolled_size (struct loop_size *size,
unsigned HOST_WIDE_INT nunroll)
{
HOST_WIDE_INT unr_insns = ((nunroll)
* (HOST_WIDE_INT) (size->overall
- size->eliminated_by_peeling));
if (!nunroll)
unr_insns = 0;
unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
unr_insns = unr_insns * 2 / 3;
if (unr_insns <= 0)
unr_insns = 1;
return unr_insns;
}
/* Loop LOOP is known to not loop. See if there is an edge in the loop
body that can be remove to make the loop to always exit and at
the same time it does not make any code potentially executed
during the last iteration dead.
After complete unrolling we still may get rid of the conditional
on the exit in the last copy even if we have no idea what it does.
This is quite common case for loops of form
int a[5];
for (i=0;i<b;i++)
a[i]=0;
Here we prove the loop to iterate 5 times but we do not know
it from induction variable.
For now we handle only simple case where there is exit condition
just before the latch block and the latch block contains no statements
with side effect that may otherwise terminate the execution of loop
(such as by EH or by terminating the program or longjmp).
In the general case we may want to cancel the paths leading to statements
loop-niter identified as having undefined effect in the last iteration.
The other cases are hopefully rare and will be cleaned up later. */
static edge
loop_edge_to_cancel (struct loop *loop)
{
vec<edge> exits;
unsigned i;
edge edge_to_cancel;
gimple_stmt_iterator gsi;
/* We want only one predecestor of the loop. */
if (EDGE_COUNT (loop->latch->preds) > 1)
return NULL;
exits = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
{
/* Find the other edge than the loop exit
leaving the conditoinal. */
if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
continue;
if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
else
edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
/* We only can handle conditionals. */
if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
continue;
/* We should never have conditionals in the loop latch. */
gcc_assert (edge_to_cancel->dest != loop->header);
/* Check that it leads to loop latch. */
if (edge_to_cancel->dest != loop->latch)
continue;
exits.release ();
/* Verify that the code in loop latch does nothing that may end program
execution without really reaching the exit. This may include
non-pure/const function calls, EH statements, volatile ASMs etc. */
for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
if (gimple_has_side_effects (gsi_stmt (gsi)))
return NULL;
return edge_to_cancel;
}
exits.release ();
return NULL;
}
/* Remove all tests for exits that are known to be taken after LOOP was
peeled NPEELED times. Put gcc_unreachable before every statement
known to not be executed. */
static bool
remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
{
struct nb_iter_bound *elt;
bool changed = false;
for (elt = loop->bounds; elt; elt = elt->next)
{
/* If statement is known to be undefined after peeling, turn it
into unreachable (or trap when debugging experience is supposed
to be good). */
if (!elt->is_exit
&& wi::ltu_p (elt->bound, npeeled))
{
gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
gcall *stmt = gimple_build_call
(builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
gimple_set_location (stmt, gimple_location (elt->stmt));
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
split_block (gimple_bb (stmt), stmt);
changed = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Forced statement unreachable: ");
print_gimple_stmt (dump_file, elt->stmt, 0);
}
}
/* If we know the exit will be taken after peeling, update. */
else if (elt->is_exit
&& wi::leu_p (elt->bound, npeeled))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Forced exit to be taken: ");
print_gimple_stmt (dump_file, elt->stmt, 0);
}
if (!loop_exit_edge_p (loop, exit_edge))
exit_edge = EDGE_SUCC (bb, 1);
exit_edge->probability = profile_probability::always ();
gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
gcond *cond_stmt = as_a <gcond *> (elt->stmt);
if (exit_edge->flags & EDGE_TRUE_VALUE)
gimple_cond_make_true (cond_stmt);
else
gimple_cond_make_false (cond_stmt);
update_stmt (cond_stmt);
changed = true;
}
}
return changed;
}
/* Remove all exits that are known to be never taken because of the loop bound
discovered. */
static bool
remove_redundant_iv_tests (struct loop *loop)
{
struct nb_iter_bound *elt;
bool changed = false;
if (!loop->any_upper_bound)
return false;
for (elt = loop->bounds; elt; elt = elt->next)
{
/* Exit is pointless if it won't be taken before loop reaches
upper bound. */
if (elt->is_exit && loop->any_upper_bound
&& wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
struct tree_niter_desc niter;
if (!loop_exit_edge_p (loop, exit_edge))
exit_edge = EDGE_SUCC (bb, 1);
/* Only when we know the actual number of iterations, not
just a bound, we can remove the exit. */
if (!number_of_iterations_exit (loop, exit_edge,
&niter, false, false)
|| !integer_onep (niter.assumptions)
|| !integer_zerop (niter.may_be_zero)
|| !niter.niter
|| TREE_CODE (niter.niter) != INTEGER_CST
|| !wi::ltu_p (loop->nb_iterations_upper_bound,
wi::to_widest (niter.niter)))
continue;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Removed pointless exit: ");
print_gimple_stmt (dump_file, elt->stmt, 0);
}
gcond *cond_stmt = as_a <gcond *> (elt->stmt);
if (exit_edge->flags & EDGE_TRUE_VALUE)
gimple_cond_make_false (cond_stmt);
else
gimple_cond_make_true (cond_stmt);
update_stmt (cond_stmt);
changed = true;
}
}
return changed;
}
/* Stores loops that will be unlooped and edges that will be removed
after we process whole loop tree. */
static vec<loop_p> loops_to_unloop;
static vec<int> loops_to_unloop_nunroll;
static vec<edge> edges_to_remove;
/* Stores loops that has been peeled. */
static bitmap peeled_loops;
/* Cancel all fully unrolled loops by putting __builtin_unreachable
on the latch edge.
We do it after all unrolling since unlooping moves basic blocks
across loop boundaries trashing loop closed SSA form as well
as SCEV info needed to be intact during unrolling.
IRRED_INVALIDATED is used to bookkeep if information about
irreducible regions may become invalid as a result
of the transformation.
LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
when we need to go into loop closed SSA form. */
static void
unloop_loops (bitmap loop_closed_ssa_invalidated,
bool *irred_invalidated)
{
while (loops_to_unloop.length ())
{
struct loop *loop = loops_to_unloop.pop ();
int n_unroll = loops_to_unloop_nunroll.pop ();
basic_block latch = loop->latch;
edge latch_edge = loop_latch_edge (loop);
int flags = latch_edge->flags;
location_t locus = latch_edge->goto_locus;
gcall *stmt;
gimple_stmt_iterator gsi;
remove_exits_and_undefined_stmts (loop, n_unroll);
/* Unloop destroys the latch edge. */
unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
/* Create new basic block for the latch edge destination and wire
it in. */
stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
latch_edge->probability = profile_probability::never ();
latch_edge->flags |= flags;
latch_edge->goto_locus = locus;
add_bb_to_loop (latch_edge->dest, current_loops->tree_root);
latch_edge->dest->count = profile_count::zero ();
set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
gsi = gsi_start_bb (latch_edge->dest);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
}
loops_to_unloop.release ();
loops_to_unloop_nunroll.release ();
/* Remove edges in peeled copies. Given remove_path removes dominated
regions we need to cope with removal of already removed paths. */
unsigned i;
edge e;
auto_vec<int, 20> src_bbs;
src_bbs.reserve_exact (edges_to_remove.length ());
FOR_EACH_VEC_ELT (edges_to_remove, i, e)
src_bbs.quick_push (e->src->index);
FOR_EACH_VEC_ELT (edges_to_remove, i, e)
if (BASIC_BLOCK_FOR_FN (cfun, src_bbs[i]))
{
bool ok = remove_path (e, irred_invalidated,
loop_closed_ssa_invalidated);
gcc_assert (ok);
}
edges_to_remove.release ();
}
/* Tries to unroll LOOP completely, i.e. NITER times.
UL determines which loops we are allowed to unroll.
EXIT is the exit of the loop that should be eliminated.
MAXITER specfy bound on number of iterations, -1 if it is
not known or too large for HOST_WIDE_INT. The location
LOCUS corresponding to the loop is used when emitting
a summary of the unroll to the dump file. */
static bool
try_unroll_loop_completely (struct loop *loop,
edge exit, tree niter, bool may_be_zero,
enum unroll_level ul,
HOST_WIDE_INT maxiter,
location_t locus, bool allow_peel)
{
unsigned HOST_WIDE_INT n_unroll = 0;
bool n_unroll_found = false;
edge edge_to_cancel = NULL;
/* See if we proved number of iterations to be low constant.
EXIT is an edge that will be removed in all but last iteration of
the loop.
EDGE_TO_CACNEL is an edge that will be removed from the last iteration
of the unrolled sequence and is expected to make the final loop not
rolling.
If the number of execution of loop is determined by standard induction
variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
from the iv test. */
if (tree_fits_uhwi_p (niter))
{
n_unroll = tree_to_uhwi (niter);
n_unroll_found = true;
edge_to_cancel = EDGE_SUCC (exit->src, 0);
if (edge_to_cancel == exit)
edge_to_cancel = EDGE_SUCC (exit->src, 1);
}
/* We do not know the number of iterations and thus we can not eliminate
the EXIT edge. */
else
exit = NULL;
/* See if we can improve our estimate by using recorded loop bounds. */
if ((allow_peel || maxiter == 0 || ul == UL_NO_GROWTH)
&& maxiter >= 0
&& (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
{
n_unroll = maxiter;
n_unroll_found = true;
/* Loop terminates before the IV variable test, so we can not
remove it in the last iteration. */
edge_to_cancel = NULL;
}
if (!n_unroll_found)
return false;
if (!loop->unroll
&& n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d "
"(--param max-completely-peel-times limit reached).\n",
loop->num);
return false;
}
if (!edge_to_cancel)
edge_to_cancel = loop_edge_to_cancel (loop);
if (n_unroll)
{
if (ul == UL_SINGLE_ITER)
return false;
if (loop->unroll)
{
/* If the unrolling factor is too large, bail out. */
if (n_unroll > (unsigned)loop->unroll)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Not unrolling loop %d: "
"user didn't want it unrolled completely.\n",
loop->num);
return false;
}
}
else
{
struct loop_size size;
/* EXIT can be removed only if we are sure it passes first N_UNROLL
iterations. */
bool remove_exit = (exit && niter
&& TREE_CODE (niter) == INTEGER_CST
&& wi::leu_p (n_unroll, wi::to_widest (niter)));
bool large
= tree_estimate_loop_size
(loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
if (large)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
loop->num);
return false;
}
unsigned HOST_WIDE_INT ninsns = size.overall;
unsigned HOST_WIDE_INT unr_insns
= estimated_unrolled_size (&size, n_unroll);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
fprintf (dump_file, " Estimated size after unrolling: %d\n",
(int) unr_insns);
}
/* If the code is going to shrink, we don't need to be extra
cautious on guessing if the unrolling is going to be
profitable. */
if (unr_insns
/* If there is IV variable that will become constant, we
save one instruction in the loop prologue we do not
account otherwise. */
<= ninsns + (size.constant_iv != false))
;
/* We unroll only inner loops, because we do not consider it
profitable otheriwse. We still can cancel loopback edge
of not rolling loop; this is always a good idea. */
else if (ul == UL_NO_GROWTH)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
loop->num);
return false;
}
/* Outer loops tend to be less interesting candidates for
complete unrolling unless we can do a lot of propagation
into the inner loop body. For now we disable outer loop
unrolling when the code would grow. */
else if (loop->inner)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
"it is not innermost and code would grow.\n",
loop->num);
return false;
}
/* If there is call on a hot path through the loop, then
there is most probably not much to optimize. */
else if (size.num_non_pure_calls_on_hot_path)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
"contains call and code would grow.\n",
loop->num);
return false;
}
/* If there is pure/const call in the function, then we can
still optimize the unrolled loop body if it contains some
other interesting code than the calls and code storing or
cumulating the return value. */
else if (size.num_pure_calls_on_hot_path
/* One IV increment, one test, one ivtmp store and
one useful stmt. That is about minimal loop
doing pure call. */
&& (size.non_call_stmts_on_hot_path
<= 3 + size.num_pure_calls_on_hot_path))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
"contains just pure calls and code would grow.\n",
loop->num);
return false;
}
/* Complete unrolling is major win when control flow is
removed and one big basic block is created. If the loop
contains control flow the optimization may still be a win
because of eliminating the loop overhead but it also may
blow the branch predictor tables. Limit number of
branches on the hot path through the peeled sequence. */
else if (size.num_branches_on_hot_path * (int)n_unroll
> PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
"number of branches on hot path in the unrolled "
"sequence reaches --param max-peel-branches limit.\n",
loop->num);
return false;
}
else if (unr_insns
> (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Not unrolling loop %d: "
"number of insns in the unrolled sequence reaches "
"--param max-completely-peeled-insns limit.\n",
loop->num);
return false;
}
}
initialize_original_copy_tables ();
auto_sbitmap wont_exit (n_unroll + 1);
if (exit && niter
&& TREE_CODE (niter) == INTEGER_CST
&& wi::leu_p (n_unroll, wi::to_widest (niter)))
{
bitmap_ones (wont_exit);
if (wi::eq_p (wi::to_widest (niter), n_unroll)
|| edge_to_cancel)
bitmap_clear_bit (wont_exit, 0);
}
else
{
exit = NULL;
bitmap_clear (wont_exit);
}
if (may_be_zero)
bitmap_clear_bit (wont_exit, 1);
if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
n_unroll, wont_exit,
exit, &edges_to_remove,
DLTHE_FLAG_UPDATE_FREQ
| DLTHE_FLAG_COMPLETTE_PEEL))
{
free_original_copy_tables ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Failed to duplicate the loop\n");
return false;
}
free_original_copy_tables ();
}
/* Remove the conditional from the last copy of the loop. */
if (edge_to_cancel)
{
gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
force_edge_cold (edge_to_cancel, true);
if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
gimple_cond_make_false (cond);
else
gimple_cond_make_true (cond);
update_stmt (cond);
/* Do not remove the path, as doing so may remove outer loop and
confuse bookkeeping code in tree_unroll_loops_completely. */
}
/* Store the loop for later unlooping and exit removal. */
loops_to_unloop.safe_push (loop);
loops_to_unloop_nunroll.safe_push (n_unroll);
if (dump_enabled_p ())
{
if (!n_unroll)
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
"loop turned into non-loop; it never loops\n");
else
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
"loop with %d iterations completely unrolled",
(int) n_unroll);
if (loop->header->count.initialized_p ())
dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
" (header execution count %d)",
(int)loop->header->count.to_gcov_type ());
dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
}
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
if (exit)
fprintf (dump_file, "Exit condition of peeled iterations was "
"eliminated.\n");
if (edge_to_cancel)
fprintf (dump_file, "Last iteration exit edge was proved true.\n");
else
fprintf (dump_file, "Latch of last iteration was marked by "
"__builtin_unreachable ().\n");
}
return true;
}
/* Return number of instructions after peeling. */
static unsigned HOST_WIDE_INT
estimated_peeled_sequence_size (struct loop_size *size,
unsigned HOST_WIDE_INT npeel)
{
return MAX (npeel * (HOST_WIDE_INT) (size->overall
- size->eliminated_by_peeling), 1);
}
/* If the loop is expected to iterate N times and is
small enough, duplicate the loop body N+1 times before
the loop itself. This way the hot path will never
enter the loop.
Parameters are the same as for try_unroll_loops_completely */
static bool
try_peel_loop (struct loop *loop,
edge exit, tree niter, bool may_be_zero,
HOST_WIDE_INT maxiter)
{
HOST_WIDE_INT npeel;
struct loop_size size;
int peeled_size;
if (!flag_peel_loops
|| PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
|| !peeled_loops)
return false;
if (bitmap_bit_p (peeled_loops, loop->num))
{
if (dump_file)
fprintf (dump_file, "Not peeling: loop is already peeled\n");
return false;
}
/* We don't peel loops that will be unrolled as this can duplicate a
loop more times than the user requested. */
if (loop->unroll)
{
if (dump_file)
fprintf (dump_file, "Not peeling: user didn't want it peeled.\n");
return false;
}
/* Peel only innermost loops.
While the code is perfectly capable of peeling non-innermost loops,
the heuristics would probably need some improvements. */
if (loop->inner)
{
if (dump_file)
fprintf (dump_file, "Not peeling: outer loop\n");
return false;
}
if (!optimize_loop_for_speed_p (loop))
{
if (dump_file)
fprintf (dump_file, "Not peeling: cold loop\n");
return false;
}
/* Check if there is an estimate on the number of iterations. */
npeel = estimated_loop_iterations_int (loop);
if (npeel < 0)
npeel = likely_max_loop_iterations_int (loop);
if (npeel < 0)
{
if (dump_file)
fprintf (dump_file, "Not peeling: number of iterations is not "
"estimated\n");
return false;
}
if (maxiter >= 0 && maxiter <= npeel)
{
if (dump_file)
fprintf (dump_file, "Not peeling: upper bound is known so can "
"unroll completely\n");
return false;
}
/* We want to peel estimated number of iterations + 1 (so we never
enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
and be sure to avoid overflows. */
if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
{
if (dump_file)
fprintf (dump_file, "Not peeling: rolls too much "
"(%i + 1 > --param max-peel-times)\n", (int) npeel);
return false;
}
npeel++;
/* Check peeled loops size. */
tree_estimate_loop_size (loop, exit, NULL, &size,
PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
> PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
{
if (dump_file)
fprintf (dump_file, "Not peeling: peeled sequence size is too large "
"(%i insns > --param max-peel-insns)", peeled_size);
return false;
}
/* Duplicate possibly eliminating the exits. */
initialize_original_copy_tables ();
auto_sbitmap wont_exit (npeel + 1);
if (exit && niter
&& TREE_CODE (niter) == INTEGER_CST
&& wi::leu_p (npeel, wi::to_widest (niter)))
{
bitmap_ones (wont_exit);
bitmap_clear_bit (wont_exit, 0);
}
else
{
exit = NULL;
bitmap_clear (wont_exit);
}
if (may_be_zero)
bitmap_clear_bit (wont_exit, 1);
if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
npeel, wont_exit,
exit, &edges_to_remove,
DLTHE_FLAG_UPDATE_FREQ))
{
free_original_copy_tables ();
return false;
}
free_original_copy_tables ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Peeled loop %d, %i times.\n",
loop->num, (int) npeel);
}
if (loop->any_estimate)
{
if (wi::ltu_p (npeel, loop->nb_iterations_estimate))
loop->nb_iterations_estimate -= npeel;
else
loop->nb_iterations_estimate = 0;
}
if (loop->any_upper_bound)
{
if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound))
loop->nb_iterations_upper_bound -= npeel;
else
loop->nb_iterations_upper_bound = 0;
}
if (loop->any_likely_upper_bound)
{
if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound))
loop->nb_iterations_likely_upper_bound -= npeel;
else
{
loop->any_estimate = true;
loop->nb_iterations_estimate = 0;
loop->nb_iterations_likely_upper_bound = 0;
}
}
profile_count entry_count = profile_count::zero ();
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, loop->header->preds)
if (e->src != loop->latch)
{
if (e->src->count.initialized_p ())
entry_count = e->src->count + e->src->count;
gcc_assert (!flow_bb_inside_loop_p (loop, e->src));
}
profile_probability p = profile_probability::very_unlikely ();
p = entry_count.probability_in (loop->header->count);
scale_loop_profile (loop, p, 0);
bitmap_set_bit (peeled_loops, loop->num);
return true;
}
/* Adds a canonical induction variable to LOOP if suitable.
CREATE_IV is true if we may create a new iv. UL determines
which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
to determine the number of iterations of a loop by direct evaluation.
Returns true if cfg is changed. */
static bool
canonicalize_loop_induction_variables (struct loop *loop,
bool create_iv, enum unroll_level ul,
bool try_eval, bool allow_peel)
{
edge exit = NULL;
tree niter;
HOST_WIDE_INT maxiter;
bool modified = false;
location_t locus = UNKNOWN_LOCATION;
struct tree_niter_desc niter_desc;
bool may_be_zero = false;
/* For unrolling allow conditional constant or zero iterations, thus
perform loop-header copying on-the-fly. */
exit = single_exit (loop);
niter = chrec_dont_know;
if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false))
{
niter = niter_desc.niter;
may_be_zero
= niter_desc.may_be_zero && !integer_zerop (niter_desc.may_be_zero);
}
if (TREE_CODE (niter) == INTEGER_CST)
locus = gimple_location (last_stmt (exit->src));
else
{
/* For non-constant niter fold may_be_zero into niter again. */
if (may_be_zero)
{
if (COMPARISON_CLASS_P (niter_desc.may_be_zero))
niter = fold_build3 (COND_EXPR, TREE_TYPE (niter),
niter_desc.may_be_zero,
build_int_cst (TREE_TYPE (niter), 0), niter);
else
niter = chrec_dont_know;
may_be_zero = false;
}
/* If the loop has more than one exit, try checking all of them
for # of iterations determinable through scev. */
if (!exit)
niter = find_loop_niter (loop, &exit);
/* Finally if everything else fails, try brute force evaluation. */
if (try_eval
&& (chrec_contains_undetermined (niter)
|| TREE_CODE (niter) != INTEGER_CST))
niter = find_loop_niter_by_eval (loop, &exit);
if (exit)
locus = gimple_location (last_stmt (exit->src));
if (TREE_CODE (niter) != INTEGER_CST)
exit = NULL;
}
/* We work exceptionally hard here to estimate the bound
by find_loop_niter_by_eval. Be sure to keep it for future. */
if (niter && TREE_CODE (niter) == INTEGER_CST)
{
record_niter_bound (loop, wi::to_widest (niter),
exit == single_likely_exit (loop), true);
}
/* Force re-computation of loop bounds so we can remove redundant exits. */
maxiter = max_loop_iterations_int (loop);
if (dump_file && (dump_flags & TDF_DETAILS)
&& TREE_CODE (niter) == INTEGER_CST)
{
fprintf (dump_file, "Loop %d iterates ", loop->num);
print_generic_expr (dump_file, niter, TDF_SLIM);
fprintf (dump_file, " times.\n");
}
if (dump_file && (dump_flags & TDF_DETAILS)
&& maxiter >= 0)
{
fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
(int)maxiter);
}
if (dump_file && (dump_flags & TDF_DETAILS)
&& likely_max_loop_iterations_int (loop) >= 0)
{
fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
loop->num, (int)likely_max_loop_iterations_int (loop));
}
/* Remove exits that are known to be never taken based on loop bound.
Needs to be called after compilation of max_loop_iterations_int that
populates the loop bounds. */
modified |= remove_redundant_iv_tests (loop);
if (try_unroll_loop_completely (loop, exit, niter, may_be_zero, ul,
maxiter, locus, allow_peel))
return true;
if (create_iv
&& niter && !chrec_contains_undetermined (niter)
&& exit && just_once_each_iteration_p (loop, exit->src))
{
tree iv_niter = niter;
if (may_be_zero)
{
if (COMPARISON_CLASS_P (niter_desc.may_be_zero))
iv_niter = fold_build3 (COND_EXPR, TREE_TYPE (iv_niter),
niter_desc.may_be_zero,
build_int_cst (TREE_TYPE (iv_niter), 0),
iv_niter);
else
iv_niter = NULL_TREE;
}
if (iv_niter)
create_canonical_iv (loop, exit, iv_niter);
}
if (ul == UL_ALL)
modified |= try_peel_loop (loop, exit, niter, may_be_zero, maxiter);
return modified;
}
/* The main entry point of the pass. Adds canonical induction variables
to the suitable loops. */
unsigned int
canonicalize_induction_variables (void)
{
struct loop *loop;
bool changed = false;
bool irred_invalidated = false;
bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
estimate_numbers_of_iterations (cfun);
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
changed |= canonicalize_loop_induction_variables (loop,
true, UL_SINGLE_ITER,
true, false);
}
gcc_assert (!need_ssa_update_p (cfun));
unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
if (irred_invalidated
&& loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
mark_irreducible_loops ();
/* Clean up the information about numbers of iterations, since brute force
evaluation could reveal new information. */
free_numbers_of_iterations_estimates (cfun);
scev_reset ();
if (!bitmap_empty_p (loop_closed_ssa_invalidated))
{
gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
}
BITMAP_FREE (loop_closed_ssa_invalidated);
if (changed)
return TODO_cleanup_cfg;
return 0;
}
/* Propagate constant SSA_NAMEs defined in basic block BB. */
static void
propagate_constants_for_unrolling (basic_block bb)
{
/* Look for degenerate PHI nodes with constant argument. */
for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
{
gphi *phi = gsi.phi ();
tree result = gimple_phi_result (phi);
tree arg = gimple_phi_arg_def (phi, 0);
if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result)
&& gimple_phi_num_args (phi) == 1
&& CONSTANT_CLASS_P (arg))
{
replace_uses_by (result, arg);
gsi_remove (&gsi, true);
release_ssa_name (result);
}
else
gsi_next (&gsi);
}
/* Look for assignments to SSA names with constant RHS. */
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
gimple *stmt = gsi_stmt (gsi);
tree lhs;
if (is_gimple_assign (stmt)
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_constant
&& (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
&& !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
{
replace_uses_by (lhs, gimple_assign_rhs1 (stmt));
gsi_remove (&gsi, true);
release_ssa_name (lhs);
}
else
gsi_next (&gsi);
}
}
/* Process loops from innermost to outer, stopping at the innermost
loop we unrolled. */
static bool
tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
bitmap father_bbs, struct loop *loop)
{
struct loop *loop_father;
bool changed = false;
struct loop *inner;
enum unroll_level ul;
unsigned num = number_of_loops (cfun);
/* Process inner loops first. Don't walk loops added by the recursive
calls because SSA form is not up-to-date. They can be handled in the
next iteration. */
for (inner = loop->inner; inner != NULL; inner = inner->next)
if ((unsigned) inner->num < num)
changed |= tree_unroll_loops_completely_1 (may_increase_size,
unroll_outer, father_bbs,
inner);
/* If we changed an inner loop we cannot process outer loops in this
iteration because SSA form is not up-to-date. Continue with
siblings of outer loops instead. */
if (changed)
return true;
/* Don't unroll #pragma omp simd loops until the vectorizer
attempts to vectorize those. */
if (loop->force_vectorize)
return false;
/* Try to unroll this loop. */
loop_father = loop_outer (loop);
if (!loop_father)
return false;
if (loop->unroll > 1)
ul = UL_ALL;
else if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
/* Unroll outermost loops only if asked to do so or they do
not cause code growth. */
&& (unroll_outer || loop_outer (loop_father)))
ul = UL_ALL;
else
ul = UL_NO_GROWTH;
if (canonicalize_loop_induction_variables
(loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer))
{
/* If we'll continue unrolling, we need to propagate constants
within the new basic blocks to fold away induction variable
computations; otherwise, the size might blow up before the
iteration is complete and the IR eventually cleaned up. */
if (loop_outer (loop_father))
bitmap_set_bit (father_bbs, loop_father->header->index);
return true;
}
return false;
}
/* Unroll LOOPS completely if they iterate just few times. Unless
MAY_INCREASE_SIZE is true, perform the unrolling only if the
size of the code does not increase. */
static unsigned int
tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
{
bitmap father_bbs = BITMAP_ALLOC (NULL);
bool changed;
int iteration = 0;
bool irred_invalidated = false;
estimate_numbers_of_iterations (cfun);
do
{
changed = false;
bitmap loop_closed_ssa_invalidated = NULL;
if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
free_numbers_of_iterations_estimates (cfun);
estimate_numbers_of_iterations (cfun);
changed = tree_unroll_loops_completely_1 (may_increase_size,
unroll_outer, father_bbs,
current_loops->tree_root);
if (changed)
{
unsigned i;
unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
/* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
if (loop_closed_ssa_invalidated
&& !bitmap_empty_p (loop_closed_ssa_invalidated))
rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
TODO_update_ssa);
else
update_ssa (TODO_update_ssa);
/* father_bbs is a bitmap of loop father header BB indices.
Translate that to what non-root loops these BBs belong to now. */
bitmap_iterator bi;
bitmap fathers = BITMAP_ALLOC (NULL);
EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi)
{
basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (! unrolled_loop_bb)
continue;
if (loop_outer (unrolled_loop_bb->loop_father))
bitmap_set_bit (fathers,
unrolled_loop_bb->loop_father->num);
}
bitmap_clear (father_bbs);
/* Propagate the constants within the new basic blocks. */
EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi)
{
loop_p father = get_loop (cfun, i);
basic_block *body = get_loop_body_in_dom_order (father);
for (unsigned j = 0; j < father->num_nodes; j++)
propagate_constants_for_unrolling (body[j]);
free (body);
}
BITMAP_FREE (fathers);
/* This will take care of removing completely unrolled loops
from the loop structures so we can continue unrolling now
innermost loops. */
if (cleanup_tree_cfg ())
update_ssa (TODO_update_ssa_only_virtuals);
/* Clean up the information about numbers of iterations, since
complete unrolling might have invalidated it. */
scev_reset ();
if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA))
verify_loop_closed_ssa (true);
}
if (loop_closed_ssa_invalidated)
BITMAP_FREE (loop_closed_ssa_invalidated);
}
while (changed
&& ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
BITMAP_FREE (father_bbs);
if (irred_invalidated
&& loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
mark_irreducible_loops ();
return 0;
}
/* Canonical induction variable creation pass. */
namespace {
const pass_data pass_data_iv_canon =
{
GIMPLE_PASS, /* type */
"ivcanon", /* name */
OPTGROUP_LOOP, /* optinfo_flags */
TV_TREE_LOOP_IVCANON, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_iv_canon : public gimple_opt_pass
{
public:
pass_iv_canon (gcc::context *ctxt)
: gimple_opt_pass (pass_data_iv_canon, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
virtual unsigned int execute (function *fun);
}; // class pass_iv_canon
unsigned int
pass_iv_canon::execute (function *fun)
{
if (number_of_loops (fun) <= 1)
return 0;
return canonicalize_induction_variables ();
}
} // anon namespace
gimple_opt_pass *
make_pass_iv_canon (gcc::context *ctxt)
{
return new pass_iv_canon (ctxt);
}
/* Complete unrolling of loops. */
namespace {
const pass_data pass_data_complete_unroll =
{
GIMPLE_PASS, /* type */
"cunroll", /* name */
OPTGROUP_LOOP, /* optinfo_flags */
TV_COMPLETE_UNROLL, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_complete_unroll : public gimple_opt_pass
{
public:
pass_complete_unroll (gcc::context *ctxt)
: gimple_opt_pass (pass_data_complete_unroll, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *);
}; // class pass_complete_unroll
unsigned int
pass_complete_unroll::execute (function *fun)
{
if (number_of_loops (fun) <= 1)
return 0;
/* If we ever decide to run loop peeling more than once, we will need to
track loops already peeled in loop structures themselves to avoid
re-peeling the same loop multiple times. */
if (flag_peel_loops)
peeled_loops = BITMAP_ALLOC (NULL);
unsigned int val = tree_unroll_loops_completely (flag_unroll_loops
|| flag_peel_loops
|| optimize >= 3, true);
if (peeled_loops)
{
BITMAP_FREE (peeled_loops);
peeled_loops = NULL;
}
return val;
}
} // anon namespace
gimple_opt_pass *
make_pass_complete_unroll (gcc::context *ctxt)
{
return new pass_complete_unroll (ctxt);
}
/* Complete unrolling of inner loops. */
namespace {
const pass_data pass_data_complete_unrolli =
{
GIMPLE_PASS, /* type */
"cunrolli", /* name */
OPTGROUP_LOOP, /* optinfo_flags */
TV_COMPLETE_UNROLL, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_complete_unrolli : public gimple_opt_pass
{
public:
pass_complete_unrolli (gcc::context *ctxt)
: gimple_opt_pass (pass_data_complete_unrolli, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return optimize >= 2; }
virtual unsigned int execute (function *);
}; // class pass_complete_unrolli
unsigned int
pass_complete_unrolli::execute (function *fun)
{
unsigned ret = 0;
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
if (number_of_loops (fun) > 1)
{
scev_initialize ();
ret = tree_unroll_loops_completely (optimize >= 3, false);
scev_finalize ();
}
loop_optimizer_finalize ();
return ret;
}
} // anon namespace
gimple_opt_pass *
make_pass_complete_unrolli (gcc::context *ctxt)
{
return new pass_complete_unrolli (ctxt);
}
|
GB_binop__copysign_fp64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__copysign_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp64)
// C=scalar+B GB (_bind1st__copysign_fp64)
// C=scalar+B' GB (_bind1st_tran__copysign_fp64)
// C=A+scalar GB (_bind2nd__copysign_fp64)
// C=A'+scalar GB (_bind2nd_tran__copysign_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = copysign (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = copysign (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COPYSIGN || GxB_NO_FP64 || GxB_NO_COPYSIGN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__copysign_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__copysign_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__copysign_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__copysign_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = copysign (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__copysign_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = copysign (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__copysign_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__copysign_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simp.c
|
/* Start reading here */
#include <fftw3.h>
#define NUM_POINTS 64
/* Never mind this bit */
#include <stdio.h>
#include <math.h>
#define REAL 0
#define IMAG 1
double theta;
void acquire_from_somewhere(fftw_complex* signal) {
/* Generate two sine waves of different frequencies and
* * amplitudes.
* */
int i;
#pragma omp for private(theta)
for (i = 0; i < NUM_POINTS; ++i) {
theta = (double)i / (double)NUM_POINTS * M_PI;
signal[i][REAL] = 1.0 * cos(4.0 * theta) + 0.5 * cos( 8.0 * theta);
signal[i][IMAG] = 1.0 * sin(2.0 * theta) + 0.5 * sin(16.0 * theta);
signal[i][IMAG] = 1.0 * cos(2.0 * theta) + 0.5 * cos(16.0 * theta);
// signal[i][REAL]=i;
// signal[i][IMAG]=0;
}
}
void do_something_with(fftw_complex* result) {
int i;
for (i = 0; i < NUM_POINTS; ++i) {
double mag = sqrt(result[i][REAL] * result[i][REAL] +
result[i][IMAG] * result[i][IMAG]);
printf("%23.12f %10.5f %10.5f\n", mag,result[i][REAL] ,result[i][IMAG]);
}
}
/* Resume reading here */
int main() {
fftw_complex signal[NUM_POINTS];
fftw_complex result[NUM_POINTS];
fftw_plan plan = fftw_plan_dft_1d(NUM_POINTS,
signal,
result,
FFTW_FORWARD,
FFTW_ESTIMATE);
acquire_from_somewhere(signal);
fftw_execute(plan);
do_something_with(result);
fftw_destroy_plan(plan);
return 0;
}
|
libsvm_parser.h
|
/*!
* Copyright (c) 2015 by Contributors
* \file libsvm_parser.h
* \brief iterator parser to parse libsvm format
* \author Tianqi Chen
*/
#ifndef XGBOOST_IO_LIBSVM_PARSER_H_
#define XGBOOST_IO_LIBSVM_PARSER_H_
#define NOMINMAX
#include <vector>
#include <cstring>
#include <cctype>
#include <algorithm>
#include "../utils/omp.h"
#include "../utils/utils.h"
#include "../sync/sync.h"
#include "../utils/thread_buffer.h"
#include "./sparse_batch_page.h"
namespace xgboost {
namespace io {
/*! \brief page returned by libsvm parser */
struct LibSVMPage : public SparsePage {
std::vector<float> label;
// overload clear
inline void Clear() {
SparsePage::Clear();
label.clear();
}
};
/*!
* \brief libsvm parser that parses the input lines
* and returns rows in input data
* factry that was used by threadbuffer template
*/
class LibSVMPageFactory {
public:
LibSVMPageFactory()
: bytes_read_(0), at_head_(true) {
}
inline bool Init(void) {
return true;
}
inline void Setup(dmlc::InputSplit *source,
int nthread) {
source_ = source;
int maxthread;
#pragma omp parallel
{
maxthread = omp_get_num_procs();
}
maxthread = std::max(maxthread / 2, 1);
nthread_ = std::min(maxthread, nthread);
}
inline void SetParam(const char *name, const char *val) {}
inline bool LoadNext(std::vector<LibSVMPage> *data) {
return FillData(data);
}
inline void FreeSpace(std::vector<LibSVMPage> *a) {
delete a;
}
inline std::vector<LibSVMPage> *Create(void) {
return new std::vector<LibSVMPage>();
}
inline void BeforeFirst(void) {
utils::Assert(at_head_, "cannot call beforefirst");
}
inline void Destroy(void) {
delete source_;
}
inline size_t bytes_read(void) const {
return bytes_read_;
}
protected:
inline bool FillData(std::vector<LibSVMPage> *data) {
dmlc::InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
int nthread;
#pragma omp parallel num_threads(nthread_)
{
nthread = omp_get_num_threads();
}
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
utils::Assert(chunk.size != 0, "LibSVMParser.FileData");
char *head = reinterpret_cast<char*>(chunk.dptr);
#pragma omp parallel num_threads(nthread_)
{
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
char *pbegin = BackFindEndLine(head + sbegin, head);
char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send, head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
}
return true;
}
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
inline void ParseBlock(char *begin,
char *end,
LibSVMPage *out) {
using namespace std;
out->Clear();
char *p = begin;
while (p != end) {
while (isspace(*p) && p != end) ++p;
if (p == end) break;
char *head = p;
while (isdigit(*p) && p != end) ++p;
if (*p == ':') {
out->data.push_back(SparseBatch::Entry(atol(head),
static_cast<bst_float>(atof(p + 1))));
} else {
if (out->label.size() != 0) {
out->offset.push_back(out->data.size());
}
out->label.push_back(static_cast<float>(atof(head)));
}
while (!isspace(*p) && p != end) ++p;
}
if (out->label.size() != 0) {
out->offset.push_back(out->data.size());
}
utils::Check(out->label.size() + 1 == out->offset.size(),
"LibSVMParser inconsistent");
}
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward
*/
inline char* BackFindEndLine(char *bptr,
char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r') return bptr;
}
return begin;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// at beginning, at end of stream
bool at_head_;
// source split that provides the data
dmlc::InputSplit *source_;
};
class LibSVMParser : public utils::IIterator<LibSVMPage> {
public:
explicit LibSVMParser(dmlc::InputSplit *source,
int nthread)
: at_end_(false), data_ptr_(0), data_(NULL) {
itr.SetParam("buffer_size", "2");
itr.get_factory().Setup(source, nthread);
itr.Init();
}
virtual void BeforeFirst(void) {
itr.BeforeFirst();
}
virtual bool Next(void) {
if (at_end_) return false;
while (true) {
if (data_ == NULL || data_ptr_ >= data_->size()) {
if (!itr.Next(data_)) {
at_end_ = true; return false;
} else {
data_ptr_ = 0;
}
}
while (data_ptr_ < data_->size()) {
data_ptr_ += 1;
if ((*data_)[data_ptr_ - 1].Size() != 0) {
return true;
}
}
}
return true;
}
virtual const LibSVMPage &Value(void) const {
return (*data_)[data_ptr_ - 1];
}
inline size_t bytes_read(void) const {
return itr.get_factory().bytes_read();
}
private:
bool at_end_;
size_t data_ptr_;
std::vector<LibSVMPage> *data_;
utils::ThreadBuffer<std::vector<LibSVMPage>*, LibSVMPageFactory> itr;
};
} // namespace io
} // namespace xgboost
#endif // XGBOOST_IO_LIBSVM_PARSER_H_
|
kncmbpush3.c
|
/* KNC C Library for Skeleton 3D Electromagnetic Vector PIC Code */
/* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include <string.h>
#include <immintrin.h>
#include "kncmbpush3.h"
/*--------------------------------------------------------------------*/
void ckncgbppush3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx, int ny,
int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1,int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
190 flops/particle, 1 divide, 54 loads, 6 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and
omz = (q/m)*bz(x(t),y(t),z(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
z(t+dt)=z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
.25*(vz(t+dt/2) + vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float qtmh, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm;
__m512 v_qtmh, v_dt, v_dtc, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \
vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \
omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1, \
v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp, \
v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy, \
v_oz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* time-centered kinetic energy */
/* sum1 += (acx*acx + acy*acy + acz*acz); */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* calculate cyclotron frequency */
/* omxt = qtmh*ox; */
/* omyt = qtmh*oy; */
/* omzt = qtmh*oz; */
e = _mm512_mul_ps(v_qtmh,v_ox);
f = _mm512_mul_ps(v_qtmh,v_oy);
g = _mm512_mul_ps(v_qtmh,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new velocity */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* new position */
/* dx = x + vx*dtc; */
/* dy = y + vy*dtc; */
/* dz = z + vz*dtc; */
v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* vz = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
dz = z + vz*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
vz = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
}
/* normalize kinetic energy */
*ek += 0.5f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgbppushf3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], int ncl[], int ihole[], float qbm,
float dt, float dtc, float *ek, int idimp,
int nppmx, int nx, int ny, int nz, int mx, int my,
int mz, int nxv, int nyv, int nzv, int mx1,
int my1, int mxyz1, int ntmax, int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
190 flops/particle, 1 divide, 54 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and
omz = (q/m)*bz(x(t),y(t),z(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
z(t+dt)=z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
.25*(vz(t+dt/2) + vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float qtmh, dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_qtmh, v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \
y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \
omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \
edgelx,edgely,edgelz,edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff, \
v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy, \
v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz,v_at,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d,v_sum1,a,b,c,d,e,f,g, \
h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* time-centered kinetic energy */
/* sum1 += (acx*acx + acy*acy + acz*acz); */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* calculate cyclotron frequency */
/* omxt = qtmh*ox; */
/* omyt = qtmh*oy; */
/* omzt = qtmh*oz; */
e = _mm512_mul_ps(v_qtmh,v_ox);
f = _mm512_mul_ps(v_qtmh,v_oy);
g = _mm512_mul_ps(v_qtmh,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new velocity */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* new position */
/* dx = x + vx*dtc; */
/* dy = y + vy*dtc; */
/* dz = z + vz*dtc; */
v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
dz = z + vz*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += 0.5f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrbppush3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], float qbm, float dt, float dtc,
float ci, float *ek, int idimp, int nppmx, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t))*gami,
omy = (q/m)*by(x(t),y(t),z(t))*gami,
omz = (q/m)*bz(x(t),y(t),z(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
z(t+dt) = z(t) + pz(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum px of particle n in tile m
ppart[m][4][n] = momentum py of particle n in tile m
ppart[m][5][n] = momentum pz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float qtmh, ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm;
__m512 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \
vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \
omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2, \
gami,qtmg,dtg,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x, \
v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \
v_vy,v_vz,v_ox,v_oy,v_oz,v_gami,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r, \
s,msk,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* find inverse gamma */
/* p2 = acx*acx + acy*acy + acz*acz; */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* time-centered kinetic energy */
/* sum1 += gami*p2/(1.0f + gami); */
v_at = _mm512_mul_ps(v_gami,v_at);
v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* renormalize magnetic field */
/* qtmg = qtmh*gami; */
v_at = _mm512_mul_ps(v_qtmh,v_gami);
/* calculate cyclotron frequency */
/* omxt = qtmg*ox; */
/* omyt = qtmg*oy; */
/* omzt = qtmg*oz; */
e = _mm512_mul_ps(v_at,v_ox);
f = _mm512_mul_ps(v_at,v_oy);
g = _mm512_mul_ps(v_at,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new momentum */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* update inverse gamma */
/* p2 = vx*vx + vy*vy + vz*vz; */
v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx));
v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at);
/* dtg = dtc/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_mul_ps(v_dtc,v_at); */
/* full accuracy calculation */
v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_at = _mm512_div_ps(v_dtc,v_at);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_div_ps(v_dtc,v_at); */
/* new position */
/* dx = x + vx*dtg; */
/* dy = y + vy*dtg; */
/* dz = z + vz*dtg; */
v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* vz = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new momentum */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
dz = z + vz*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
vz = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new momentum */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrbppushf3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], int ncl[], int ihole[], float qbm,
float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int nz,
int mx, int my, int mz, int nxv, int nyv, int nzv,
int mx1, int my1, int mxyz1, int ntmax,
int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t))*gami,
omy = (q/m)*by(x(t),y(t),z(t))*gami,
omz = (q/m)*bz(x(t),y(t),z(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
z(t+dt) = z(t) + pz(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum px of particle n in tile m
ppart[m][4][n] = momentum py of particle n in tile m
ppart[m][5][n] = momentum pz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg;
float qtmh, ci2, x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_qtmh, v_ci2, v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \
y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \
omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \
edgelx,edgely,edgelz,edgerx,edgery,edgerz,p2,gami,qtmg,dtg,sum1,v_noff, \
v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp, \
v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz, \
v_gami,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d, \
v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* find inverse gamma */
/* p2 = acx*acx + acy*acy + acz*acz; */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* time-centered kinetic energy */
/* sum1 += gami*p2/(1.0f + gami); */
v_at = _mm512_mul_ps(v_gami,v_at);
v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* renormalize magnetic field */
/* qtmg = qtmh*gami; */
v_at = _mm512_mul_ps(v_qtmh,v_gami);
/* calculate cyclotron frequency */
/* omxt = qtmg*ox; */
/* omyt = qtmg*oy; */
/* omzt = qtmg*oz; */
e = _mm512_mul_ps(v_at,v_ox);
f = _mm512_mul_ps(v_at,v_oy);
g = _mm512_mul_ps(v_at,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new momentum */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* update inverse gamma */
/* p2 = vx*vx + vy*vy + vz*vz; */
v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx));
v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at);
/* dtg = dtc/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_mul_ps(v_dtc,v_at); */
/* full accuracy calculation */
v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_at = _mm512_div_ps(v_dtc,v_at);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_div_ps(v_dtc,v_at); */
/* new position */
/* dx = x + vx*dtg; */
/* dy = y + vy*dtg; */
/* dz = z + vz*dtg; */
v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new momentum */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
dz = z + vz*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new momentum */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__m512 a, b, c, d, e, f, g, h, qp, qr;
__mmask16 msk, msks, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \
d,e,f,g,h,qp,qr,msk,msks,kk,sq)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* d = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
/* deposit charge for two particles at a time */
for (i = 0; i < 8; i++) {
/* first particle */
mm = kk[2*i];
msk = _mm512_int2mask(3<<(2*i));
msks = _mm512_int2mask(2<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks,
(__m512i)b,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks,
(__m512i)d,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks,
(__m512i)f,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks,
(__m512i)h,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
/* second particle */
mm = kk[2*i+1];
msks = _mm512_int2mask(1<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks,
(__m512i)a,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks,
(__m512i)c,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks,
(__m512i)e,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks,
(__m512i)g,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__mmask16 v_m;
__attribute__((aligned(64))) unsigned int kk[16];
typedef union vfloat {float v[16]; __m512 v16;} vf;
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
vf vv[8];
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* vector loop over particles in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* x = amx*amz; */
/* y = amy*amz; */
/* z = dyp*amz; */
/* w = dx1*amz; */
vv[0].v16 = _mm512_mul_ps(v_amx,v_amz);
vv[1].v16 = _mm512_mul_ps(v_amy,v_amz);
vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz);
vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz);
vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp);
vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp);
vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp);
vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
for (i = 0; i < 16; i++) {
nn = kk[i];
x = sq[nn] + vv[0].v[i];
y = sq[nn+1] + vv[1].v[i];
z = sq[nn+mxv] + vv[2].v[i];
w = sq[nn+1+mxv] + vv[3].v[i];
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + vv[4].v[i];
y = sq[mm+1] + vv[5].v[i];
z = sq[mm+mxv] + vv[6].v[i];
w = sq[mm+1+mxv] + vv[7].v[i];
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny,
int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1, int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
69 flops/particle, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float x, y, z;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_perm;
__m512 v_qm, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y, \
z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \
v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \
v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_at,v_as,a,b,c,d,e,f,g,h,p,q,r, \
s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ppart[j+3*nppmx+npoff]; */
/* vy = ppart[j+4*nppmx+npoff]; */
/* vz = ppart[j+5*nppmx+npoff]; */
v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ppart[j+3*nppmx+npoff];
vy = ppart[j+4*nppmx+npoff];
vz = ppart[j+5*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgjppostf3lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, int nppmx,
int idimp, int nx, int ny, int nz, int mx, int my,
int mz, int nxv, int nyv, int nzv, int mx1,
int my1, int mxyz1, int ntmax, int *irc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation, with periodic boundary
conditions.
in addition, particle positions are advanced a half time-step
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
69 flops/particle, 30 loads, 27 stores
input: all except ncl, ihole, irc
output: ppart, cu, ncl, ihole, ek, irc
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
optimized version
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ih, nh, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv;
int nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float x, y, z;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_qm, v_dt, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk, msk1, msk2, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ii,nm,lm, \
x,y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,edgelx,edgely, \
edgelz,edgerx,edgery,edgerz,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz, \
v_vx,v_vy,v_vz,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz, \
v_at,v_as,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,msk1, \
msk2,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ppart[j+3*nppmx+npoff]; */
/* vy = ppart[j+4*nppmx+npoff]; */
/* vz = ppart[j+5*nppmx+npoff]; */
v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ppart[j+3*nppmx+npoff];
vy = ppart[j+4*nppmx+npoff];
vz = ppart[j+5*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum vx of particle n in tile m
ppart[m][4][n] = momentum vy of particle n in tile m
ppart[m][5][n] = momentum vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float p2, gami;
float x, y, z, ux, uy, uz;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_perm;
__m512 v_qm, v_ci2, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_ux, v_uy, v_uz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \
vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \
v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \
v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_gami, \
v_at,v_as,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* find inverse gamma */
/* ux = ppart[j+3*nppmx+npoff]; */
/* uy = ppart[j+4*nppmx+npoff]; */
/* uz = ppart[j+5*nppmx+npoff]; */
v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* p2 = ux*ux + uy*uy + uz*uz; */
v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux));
v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* calculate weights */
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ux*gami; */
/* vy = uy*gami; */
/* vz = uz*gami; */
v_vx = _mm512_mul_ps(v_ux,v_gami);
v_vy = _mm512_mul_ps(v_uy,v_gami);
v_vz = _mm512_mul_ps(v_uz,v_gami);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -uz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
/* find inverse gamma */
ux = ppart[j+3*nppmx+npoff];
uy = ppart[j+4*nppmx+npoff];
uz = ppart[j+5*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -uz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrjppostf3lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, float ci,
int nppmx, int idimp, int nx, int ny, int nz,
int mx, int my, int mz, int nxv, int nyv, int nzv,
int mx1, int my1, int mxyz1, int ntmax,
int *irc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation with periodic boundary
conditions for relativistic particles.
in addition, particle positions are advanced a half time-step
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores
input: all except ncl, ihole, irc
output: ppart, cu, ncl, ihole, ek, irc
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum vx of particle n in tile m
ppart[m][4][n] = momentum vy of particle n in tile m
ppart[m][5][n] = momentum vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ih, nh, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv;
int nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float ci2, p2, gami;
float x, y, z, ux, uy, uz;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_qm, v_ci2, v_dt, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_ux, v_uy, v_uz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk, msk1, msk2, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,ih,nh, \
x,y,z,vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,edgelx, \
edgely,edgelz,edgerx,edgery,edgerz,p2,gami,v_noff,v_moff,v_loff,v_nn, \
v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1, \
v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_edgelx,v_edgely, \
v_edgelz,v_edgerx,v_edgery,v_edgerz,v_gami,v_at,v_as,a,b,c,d,e,f,g,h, \
p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,msk1,msk2,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* find inverse gamma */
/* ux = ppart[j+3*nppmx+npoff]; */
/* uy = ppart[j+4*nppmx+npoff]; */
/* uz = ppart[j+5*nppmx+npoff]; */
v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* p2 = ux*ux + uy*uy + uz*uz; */
v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux));
v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* calculate weights */
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ux*gami; */
/* vy = uy*gami; */
/* vz = uz*gami; */
v_vx = _mm512_mul_ps(v_ux,v_gami);
v_vy = _mm512_mul_ps(v_uy,v_gami);
v_vz = _mm512_mul_ps(v_uz,v_gami);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
/* find inverse gamma */
ux = ppart[j+3*nppmx+npoff];
uy = ppart[j+4*nppmx+npoff];
uz = ppart[j+5*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cknc2gjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny,
int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1, int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
69 flops/particle, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float x, y, z;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 cp;
__mmask16 msk, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
typedef union vfloat {float v[16]; __m512 v16;} vf;
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
vf vv[8], vc[3], vu;
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_qm = _mm512_set1_ps(qm);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y, \
z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \
v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \
v_dx1,v_dx,v_dy,v_dz,v_at,v_as,cp,msk,kk,scu,vv,vc,vu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
vv[0].v16 = _mm512_mul_ps(v_amx,v_amz);
vv[1].v16 = _mm512_mul_ps(v_amy,v_amz);
vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz);
vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp);
vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp);
vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp);
vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit current */
/* vx = ppart[j+3*nppmx+npoff]; */
/* vy = ppart[j+4*nppmx+npoff]; */
/* vz = ppart[j+5*nppmx+npoff]; */
vc[0].v16 = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
vc[1].v16 = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
vc[2].v16 = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
nn = kk[i];
vu.v16 = _mm512_setzero_ps();
vu.v[0] = vc[0].v[i];
vu.v[1] = vc[1].v[i];
vu.v[2] = vc[2].v[i];
v_at = _mm512_permute4f128_ps(vu.v16,0);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
vu.v[0] = vv[0].v[i];
vu.v[4] = vv[1].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[nn]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[nn+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255),
cp);
mm = nn + 4*mxv;
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* dx = amx*dzp; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
/* dx = amx*dzp; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
vu.v[0] = vv[2].v[i];
vu.v[4] = vv[3].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),
cp);
nn += 4*mxyv;
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*dzp; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
vu.v[0] = vv[4].v[i];
vu.v[4] = vv[5].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[nn]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[nn+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255),
cp);
mm = nn + 4*mxv;
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
vu.v[0] = vv[6].v[i];
vu.v[4] = vv[7].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),
cp);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(vc[0].v16,v_dt,v_x);
v_dy = _mm512_fmadd_ps(vc[1].v16,v_dt,v_y);
v_dz = _mm512_fmadd_ps(vc[2].v16,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
vc[0].v16 = _mm512_mask_sub_ps(vc[0].v16,msk,v_zero,vc[0].v16);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],vc[0].v16);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
vc[1].v16 = _mm512_mask_sub_ps(vc[1].v16,msk,v_zero,vc[1].v16);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],vc[1].v16);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
vc[2].v16 = _mm512_mask_sub_ps(vc[2].v16,msk,v_zero,vc[2].v16);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],vc[2].v16);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
vc[0].v16 = _mm512_mask_sub_ps(vc[0].v16,msk,v_zero,vc[0].v16);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],vc[0].v16);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
vc[1].v16 = _mm512_mask_sub_ps(vc[1].v16,msk,v_zero,vc[1].v16);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],vc[1].v16);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ppart[j+3*nppmx+npoff];
vy = ppart[j+4*nppmx+npoff];
vz = ppart[j+5*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cknc2grjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum vx of particle n in tile m
ppart[m][4][n] = momentum vy of particle n in tile m
ppart[m][5][n] = momentum vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float p2, gami;
float x, y, z, ux, uy, uz;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_ci2, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_ux, v_uy, v_uz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 cp;
__mmask16 msk, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
typedef union vfloat {float v[16]; __m512 v16;} vf;
__attribute__((aligned(64))) float scu[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */
vf vv[8], vc[3], vu;
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_qm = _mm512_set1_ps(qm);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \
vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \
v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \
v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_ux,v_uy,v_uz,v_gami,v_at,v_as,cp, \
msk,kk,scu,vv,vc,vu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* find inverse gamma */
/* ux = ppart[j+3*nppmx+npoff]; */
/* uy = ppart[j+4*nppmx+npoff]; */
/* uz = ppart[j+5*nppmx+npoff]; */
v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* p2 = ux*ux + uy*uy + uz*uz; */
v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux));
v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* calculate weights */
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
vv[0].v16 = _mm512_mul_ps(v_amx,v_amz);
vv[1].v16 = _mm512_mul_ps(v_amy,v_amz);
vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz);
vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp);
vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp);
vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp);
vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit current */
/* vx = ux*gami; */
/* vy = uy*gami; */
/* vz = uz*gami; */
vc[0].v16 = _mm512_mul_ps(v_ux,v_gami);
vc[1].v16 = _mm512_mul_ps(v_uy,v_gami);
vc[2].v16 = _mm512_mul_ps(v_uz,v_gami);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
nn = kk[i];
vu.v16 = _mm512_setzero_ps();
vu.v[0] = vc[0].v[i];
vu.v[1] = vc[1].v[i];
vu.v[2] = vc[2].v[i];
v_at = _mm512_permute4f128_ps(vu.v16,0);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
vu.v[0] = vv[0].v[i];
vu.v[4] = vv[1].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[nn]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[nn+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255),
cp);
mm = nn + 4*mxv;
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* dx = amx*dzp; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
/* dx = amx*dzp; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
vu.v[0] = vv[2].v[i];
vu.v[4] = vv[3].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),
cp);
nn += 4*mxyv;
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*dzp; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
vu.v[0] = vv[4].v[i];
vu.v[4] = vv[5].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[nn]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[nn+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255),
cp);
mm = nn + 4*mxv;
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
vu.v[0] = vv[6].v[i];
vu.v[4] = vv[7].v[i];
v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0);
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),
cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),
cp);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(vc[0].v16,v_dt,v_x);
v_dy = _mm512_fmadd_ps(vc[1].v16,v_dt,v_y);
v_dz = _mm512_fmadd_ps(vc[2].v16,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -uz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
/* find inverse gamma */
ux = ppart[j+3*nppmx+npoff];
uy = ppart[j+4*nppmx+npoff];
uz = ppart[j+5*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -uz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 4*(nn/4);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[4*(i+mxv*j+mxyv*k)]; */
/* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[1+4*(i+mxv*j+mxyv*k)]; */
/* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */
/* += scu[2+4*(i+mxv*j+mxyv*k)]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + mxv*j + mxyv*k);
v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]);
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]);
/* skip add for first elements for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&cu[m],v_at);
_mm512_packstorehi_ps(&cu[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int nx, int ny, int nz, int mx, int my, int mz,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dx, dy, dz;
int ks[26];
__m512i v_ist, v_it, v_0, v_1, v_3, v_9;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_anx, v_any, v_anz;
__m512 v_dx, v_dy, v_dz, v_x;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 v_zero;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_zero = _mm512_setzero_ps();
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \
dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \
msk2,ls)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
noff = (ntmax+1)*l;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* dx = ppart[j+npoff]; */
/* dy = ppart[j+nppmx+npoff]; */
/* dz = ppart[j+2*nppmx+npoff]; */
v_dx = _mm512_load_ps(&ppart[j+npoff]);
v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* find particles going out of bounds */
/* ist = 0; */
v_ist = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* ist = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* ist = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* ist = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* ist += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* ist += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* ist += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* ist += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* ist += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* ist += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
}
/* increment counters */
/* if (ist > 0) { */
/* ncl[ist+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(ls,v_ist);
for (i = 0; i < 16; i++) {
ist = ls[i];
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + i + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles in tile */
for (j = nps; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
dz = ppart[j+2*nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
ppart[j+2*nppmx+npoff] = dz - anz;
ist += 18;
}
else if (dz < edgelz) {
if (dz < 0.0) {
dz += anz;
if (dz < anz)
ist += 9;
else
dz = 0.0;
ppart[j+2*nppmx+npoff] = dz;
}
else {
ist += 9;
}
}
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*noff] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
ckncgppushf3lt subroutine.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
int ks[26];
__m512i v_it, v_0, v_1;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_x, v_zero;
__mmask16 msk1;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_zero = _mm512_setzero_ps();
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void ckncpp2order3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int nx, int ny, int nz, int mx, int my, int mz,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dx, dy, dz;
int ks[26];
__m512i v_ist, v_it, v_0, v_1, v_3, v_9;
__m512i v_m1, v_m2, v_m3, v_m4, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_anx, v_any, v_anz;
__m512 v_dx, v_dy, v_dz, v_x;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 v_zero;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int ls[16], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
v_zero = _mm512_setzero_ps();
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \
dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \
msk2,ls,lm)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
noff = (ntmax+1)*l;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* dx = ppart[j+npoff]; */
/* dy = ppart[j+nppmx+npoff]; */
/* dz = ppart[j+2*nppmx+npoff]; */
v_dx = _mm512_load_ps(&ppart[j+npoff]);
v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* find particles going out of bounds */
/* ist = 0; */
v_ist = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* ist = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* ist = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* ist = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* ist += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* ist += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* ist += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* ist += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* ist += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* ist += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
}
/* increment counters */
/* if (ist > 0) { */
/* ncl[ist+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(ls,v_ist);
/* remove zero ist values and left shift data */
ll = 0;
memset((void*)lm,0,32*sizeof(int));
for (i = 0; i < 16; i++) {
ist = ls[i];
if (ist > 0) {
lm[2*ll] = j + i + 1;
lm[1+2*ll] = ist;
ncl[ist+26*l-1] += 1;
ll += 1;
}
}
if (ll > 0) {
if ((ih+ll) > ntmax) {
nh = 1;
}
else {
v_it = _mm512_load_epi32(lm);
mm = 2*(ih+1+noff);
_mm512_packstorelo_epi32(&ihole[mm],v_it);
_mm512_packstorehi_epi32(&ihole[mm+16],v_it);
if (ll > 8) {
v_it = _mm512_load_epi32(&lm[16]);
mm += 16;
_mm512_packstorelo_epi32(&ihole[mm],v_it);
_mm512_packstorehi_epi32(&ihole[mm+16],v_it);
}
}
ih += ll;
}
}
/* loop over remaining particles in tile */
for (j = nps; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
dz = ppart[j+2*nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
ppart[j+2*nppmx+npoff] = dz - anz;
ist += 18;
}
else if (dz < edgelz) {
if (dz < 0.0) {
dz += anz;
if (dz < anz)
ist += 9;
else
dz = 0.0;
ppart[j+2*nppmx+npoff] = dz;
}
else {
ist += 9;
}
}
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*noff] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set1_epi32(nppmx);
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,j1,ist,nh,ip,v_it,v_is,v_it0, \
v_mm,v_m2,v_m3,v_m4,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m3 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(lm,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&lm[16],v_is);
v_mm = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&lm[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m3,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_mm);
/* next offset */
if (j==0) {
v_mm = _mm512_shuffle_epi32(v_it,255);
v_mm = _mm512_permute4f128_epi32(v_mm,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&lm[j],v_it);
}
/* store 26 data elements into ncl */
v_it = _mm512_load_epi32(lm);
v_is = _mm512_load_epi32(&lm[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
v_m4 = _mm512_set1_epi32(nboff);
v_it0 = _mm512_set1_epi32(npbmx);
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* buffer particles that are leaving tile, in direction order */
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(j),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_it = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_it = _mm512_sub_epi32(v_it,v_1);
v_mm = _mm512_add_epi32(v_mm,v_1);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
_mm512_store_epi32(lm,v_is);
for (ll = 0; ll < 16; ll++) {
ist = lm[ll];
ii = ncl[ist+26*l-1];
if (ii < npbmx) {
lm[ll] = ii;
}
else {
ip = 1;
}
ncl[ist+26*l-1] = ii + 1;
}
v_is = _mm512_load_epi32(lm);
v_it = _mm512_add_epi32(v_it,v_m3);
v_is = _mm512_add_epi32(v_is,v_m4);
if (ip==0) {
for (i = 0; i < idimp; i++) {
/* ppbuff[ii+npbmx*i+nboff] */
/* = ppart[j1+nppmx*i+npoff]; */
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppbuff,v_is,v_x,4);
v_it = _mm512_add_epi32(v_it,v_m1);
v_is = _mm512_add_epi32(v_is,v_it0);
}
}
/*
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ncl[ist+26*l-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+26*l-1] = ii + 1;
}
*/
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ncl[ist+26*l-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+26*l-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe,
int nye, int nze) {
/* replicate extended periodic vector field fxyz
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, fxyz needs to be 64 byte aligned
nxe needs to be a multiple of 4
local data */
int j, k, l, nxs, nxyen, ll;
nxs = 4*(nx/4);
nxyen = 4*nxe*nye;
/* copy edges of extended field */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,ll)
for (l = 0; l < nz; l++) {
ll = nxyen*l;
for (k = 0; k < ny; k++) {
fxyz[4*nx+4*nxe*k+ll] = fxyz[4*nxe*k+ll];
fxyz[1+4*nx+4*nxe*k+ll] = fxyz[1+4*nxe*k+ll];
fxyz[2+4*nx+4*nxe*k+ll] = fxyz[2+4*nxe*k+ll];
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[4*j+4*nxe*ny+ll],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j+ll]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[4*j+4*nxe*ny+ll] = fxyz[4*j+ll];
fxyz[1+4*j+4*nxe*ny+ll] = fxyz[1+4*j+ll];
fxyz[2+4*j+4*nxe*ny+ll] = fxyz[2+4*j+ll];
}
fxyz[4*nx+4*nxe*ny+ll] = fxyz[ll];
fxyz[1+4*nx+4*nxe*ny+ll] = fxyz[1+ll];
fxyz[2+4*nx+4*nxe*ny+ll] = fxyz[2+ll];
}
#pragma omp for \
private(j,k)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[4*j+4*nxe*k+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j+4*nxe*k]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[4*j+4*nxe*k+nxyen*nz] = fxyz[4*j+4*nxe*k];
fxyz[1+4*j+4*nxe*k+nxyen*nz] = fxyz[1+4*j+4*nxe*k];
fxyz[2+4*j+4*nxe*k+nxyen*nz] = fxyz[2+4*j+4*nxe*k];
}
fxyz[4*nx+4*nxe*k+nxyen*nz] = fxyz[4*nxe*k];
fxyz[1+4*nx+4*nxe*k+nxyen*nz] = fxyz[1+4*nxe*k];
fxyz[2+4*nx+4*nxe*k+nxyen*nz] = fxyz[2+4*nxe*k];
}
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[4*j+4*nxe*ny+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[4*j+4*nxe*ny+nxyen*nz] = fxyz[4*j];
fxyz[1+4*j+4*nxe*ny+nxyen*nz] = fxyz[1+4*j];
fxyz[2+4*j+4*nxe*ny+nxyen*nz] = fxyz[2+4*j];
}
fxyz[4*nx+4*nxe*ny+nxyen*nz] = fxyz[0];
fxyz[1+4*nx+4*nxe*ny+nxyen*nz] = fxyz[1];
fxyz[2+4*nx+4*nxe*ny+nxyen*nz] = fxyz[2];
return;
}
/*--------------------------------------------------------------------*/
void ckncacguard3l(float cu[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic field cu
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, fxyz needs to be 64 byte aligned
nxe needs to be a multiple of 4
local data */
int j, k, l, nxs, nxyen, ll;
__m512 v_cu, v_zero;
nxs = 4*(nx/4);
nxyen = 4*nxe*nye;
v_zero = _mm512_set1_ps(0.0f);
/* accumulate edges of extended field */
#pragma omp parallel
{
#pragma omp for \
private(j,k,l,ll,v_cu)
for (l = 0; l < nz; l++) {
ll = nxyen*l;
for (k = 0; k < ny; k++) {
cu[4*nxe*k+ll] += cu[4*nx+4*nxe*k+ll];
cu[1+4*nxe*k+ll] += cu[1+4*nx+4*nxe*k+ll];
cu[2+4*nxe*k+ll] += cu[2+4*nx+4*nxe*k+ll];
cu[4*nx+4*nxe*k+ll] = 0.0;
cu[1+4*nx+4*nxe*k+ll] = 0.0;
cu[2+4*nx+4*nxe*k+ll] = 0.0;
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+ll]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+ll]),v_cu);
_mm512_store_ps(&cu[4*j+ll],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*ny+ll],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j+ll] += cu[4*j+4*nxe*ny+ll];
cu[1+4*j+ll] += cu[1+4*j+4*nxe*ny+ll];
cu[2+4*j+ll] += cu[2+4*j+4*nxe*ny+ll];
cu[4*j+4*nxe*ny+ll] = 0.0;
cu[1+4*j+4*nxe*ny+ll] = 0.0;
cu[2+4*j+4*nxe*ny+ll] = 0.0;
}
cu[ll] += cu[4*nx+4*nxe*ny+ll];
cu[1+ll] += cu[1+4*nx+4*nxe*ny+ll];
cu[2+ll] += cu[2+4*nx+4*nxe*ny+ll];
cu[4*nx+4*nxe*ny+ll] = 0.0;
cu[1+4*nx+4*nxe*ny+ll] = 0.0;
cu[2+4*nx+4*nxe*ny+ll] = 0.0;
}
#pragma omp for \
private(j,k,v_cu)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*k+nxyen*nz]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+4*nxe*k]),v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*k],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*k+nxyen*nz],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j+4*nxe*k] += cu[4*j+4*nxe*k+nxyen*nz];
cu[1+4*j+4*nxe*k] += cu[1+4*j+4*nxe*k+nxyen*nz];
cu[2+4*j+4*nxe*k] += cu[2+4*j+4*nxe*k+nxyen*nz];
cu[4*j+4*nxe*k+nxyen*nz] = 0.0;
cu[1+4*j+4*nxe*k+nxyen*nz] = 0.0;
cu[2+4*j+4*nxe*k+nxyen*nz] = 0.0;
}
cu[4*nxe*k] += cu[4*nx+4*nxe*k+nxyen*nz];
cu[1+4*nxe*k] += cu[1+4*nx+4*nxe*k+nxyen*nz];
cu[2+4*nxe*k] += cu[2+4*nx+4*nxe*k+nxyen*nz];
cu[4*nx+4*nxe*k+nxyen*nz] = 0.0;
cu[1+4*nx+4*nxe*k+nxyen*nz] = 0.0;
cu[2+4*nx+4*nxe*k+nxyen*nz] = 0.0;
}
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+nxyen*nz]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j]),v_cu);
_mm512_store_ps(&cu[4*j],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*ny+nxyen*nz],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j] += cu[4*j+4*nxe*ny+nxyen*nz];
cu[1+4*j] += cu[1+4*j+4*nxe*ny+nxyen*nz];
cu[2+4*j] += cu[2+4*j+4*nxe*ny+nxyen*nz];
cu[4*j+4*nxe*ny+nxyen*nz] = 0.0;
cu[1+4*j+4*nxe*ny+nxyen*nz] = 0.0;
cu[2+4*j+4*nxe*ny+nxyen*nz] = 0.0;
}
cu[0] += cu[4*nx+4*nxe*ny+nxyen*nz];
cu[1] += cu[1+4*nx+4*nxe*ny+nxyen*nz];
cu[2] += cu[2+4*nx+4*nxe*ny+nxyen*nz];
cu[4*nx+4*nxe*ny+nxyen*nz] = 0.0;
cu[1+4*nx+4*nxe*ny+nxyen*nz] = 0.0;
cu[2+4*nx+4*nxe*ny+nxyen*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, q needs to be 64 byte aligned
nxe needs to be a multiple of 16
local data */
int j, k, l, nxs, nxye, ll;
__m512 v_q;
nxs = 16*(nx/16);
nxye = nxe*nye;
/* accumulate edges of extended field */
#pragma omp parallel
{
#pragma omp for \
private(j,k,l,ll,v_q)
for (l = 0; l < nz; l++) {
ll = nxye*l;
for (k = 0; k < ny; k++) {
q[nxe*k+ll] += q[nx+nxe*k+ll];
q[nx+nxe*k+ll] = 0.0;
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+ll]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q);
_mm512_store_ps(&q[j+ll],v_q);
_mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+ll] += q[j+nxe*ny+ll];
q[j+nxe*ny+ll] = 0.0;
}
q[ll] += q[nx+nxe*ny+ll];
q[nx+nxe*ny+ll] = 0.0;
}
#pragma omp for \
private(j,k,v_q)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q);
_mm512_store_ps(&q[j+nxe*k],v_q);
_mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+nxe*k] += q[j+nxe*k+nxye*nz];
q[j+nxe*k+nxye*nz] = 0.0;
}
q[nxe*k] += q[nx+nxe*k+nxye*nz];
q[nx+nxe*k+nxye*nz] = 0.0;
}
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q);
_mm512_store_ps(&q[j],v_q);
_mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j] += q[j+nxe*ny+nxye*nz];
q[j+nxe*ny+nxye*nz] = 0.0;
}
q[0] += q[nx+nxe*ny+nxye*nz];
q[nx+nxe*ny+nxye*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33(float complex q[], float complex fxyz[], int isign,
float complex ffc[], float ax, float ay, float az,
float affp, float *we, int nx, int ny, int nz,
int nxvh, int nyv, int nzv, int nxhd, int nyhd,
int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions.
for isign = 0, output: ffc
input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
for isign = -1, output: fxyz, we
input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
approximate flop count is:
59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
equation used is:
fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx],
fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx],
fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx],
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0,
fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0,
fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0,
fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0.
q[l][k][j] = complex charge density for fourier mode (j,k,l)
fxyz[l][k][j][0] = x component of complex force/charge
fxyz[l][k][j][1] = y component of complex force/charge
fxyz[l][k][j][2] = z component of complex force/charge
all for fourier mode (j,k,l)
cimag(ffc[l][k][j]) = finite-size particle shape factor s
for fourier mode (j,k,l)
creal(ffc[l][k][j]) = potential green's function g
for fourier mode (j,k,l)
ax/ay/az = half-width of particle in x/y/z direction
affp = normalization constant = nx*ny*nz/np,
where np=number of particles
electric field energy is also calculated, using
we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*
|q[kz][ky][kx]*s[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nzv = third dimension of field arrays, must be >= nz
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
requires KNC, q, fxy, ffc need to be 64 byte aligned
nxhd, nxvh need to be a multiple of 8
fxyz needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6;
float complex zero, zt1, zt2;
double wp, sum1, sum2;
__m512i v_j, v_it, v_perm;
__m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4;
__m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4;
__m512 a, b, c, d, e, f, g, h;
__m512d v_wp, v_d;
__attribute__((aligned(64))) double dd[8];
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 8*(nxh/8);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0);
if (isign != 0)
goto L40;
/* prepare form factor array */
for (l = 0; l < nzh; l++) {
dkz = dnz*(float) l;
ll = nxyhd*l;
at1 = dkz*dkz;
at2 = pow((dkz*az),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at3 = dky*dky + at1;
at4 = pow((dky*ay),2) + at2;
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at5 = dkx*dkx + at3;
at6 = exp(-0.5*(pow((dkx*ax),2) + at4));
if (at5==0.0) {
ffc[j+kk+ll] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I;
}
}
}
}
return;
/* calculate force/charge and sum field energy */
L40: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \
v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \
c,d,e,f,g,h,v_d,v_wp,dd) \
reduction(+:sum1)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(
_mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at3 = dky*at1; */
v_at3 = _mm512_mul_ps(v_dky,v_at1);
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+lj)] = at2*zt1; */
/* fxyz[1+4*(j+kj+lj)] = at3*zt1; */
/* fxyz[2+4*(j+kj+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d);
/* fxyz[4*(j+k1+lj)] = at2*zt2; */
/* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+lj)] = at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_mul_ps(v_at4,v_zt2);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d);
/* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */
/* + q[j+k1+lj]*conjf(q[j+k1+lj])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+l1)] = at2*zt1; */
/* fxyz[1+4*(j+kj+l1)] = at3*zt1; */
/* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d);
/* fxyz[4*(j+k1+l1)] = at2*zt2; */
/* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d);
/* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */
/* + q[j+k1+l1]*conjf(q[j+k1+l1])); */
v_zt4 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I;
zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I;
fxyz[4*(j+kj+lj)] = at2*zt1;
fxyz[1+4*(j+kj+lj)] = at3*zt1;
fxyz[2+4*(j+kj+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = at2*zt2;
fxyz[1+4*(j+k1+lj)] = -at3*zt2;
fxyz[2+4*(j+k1+lj)] = at4*zt2;
zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I;
zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I;
fxyz[4*(j+kj+l1)] = at2*zt1;
fxyz[1+4*(j+kj+l1)] = at3*zt1;
fxyz[2+4*(j+kj+l1)] = -at4*zt1;
fxyz[4*(j+k1+l1)] = at2*zt2;
fxyz[1+4*(j+k1+l1)] = -at3*zt2;
fxyz[2+4*(j+k1+l1)] = -at4*zt2;
at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj])
+ q[j+k1+lj]*conjf(q[j+k1+lj])
+ q[j+kj+l1]*conjf(q[j+kj+l1])
+ q[j+k1+l1]*conjf(q[j+k1+l1]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I;
zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I;
fxyz[4*(kj+lj)] = zero;
fxyz[1+4*(kj+lj)] = at3*zt1;
fxyz[2+4*(kj+lj)] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = at3*zt2;
fxyz[2+4*(kj+l1)] = -at4*zt2;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj+lj]*conjf(q[kj+lj])
+ q[kj+l1]*conjf(q[kj+l1]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+lj)] = at2*zt1; */
/* fxyz[1+4*(j+lj)] = zero; */
/* fxyz[2+4*(j+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d);
/* fxyz[4*(j+k1+lj)] = zero; */
/* fxyz[1+4*(j+k1+lj)] = zero; */
/* fxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero);
/* fxyz[4*(j+l1)] = at2*zt2; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = v_zero;
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j+lj]*conjf(q[j+lj]) */
/* + q[j+l1]*conjf(q[j+l1])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I;
zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I;
fxyz[4*(j+lj)] = at2*zt1;
fxyz[1+4*(j+lj)] = zero;
fxyz[2+4*(j+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = zero;
fxyz[1+4*(j+k1+lj)] = zero;
fxyz[2+4*(j+k1+lj)] = zero;
fxyz[4*(j+l1)] = at2*zt2;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = -at4*zt2;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+lj]*conjf(q[j+lj])
+ q[j+l1]*conjf(q[j+l1]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[ll])*cimagf(ffc[ll]);
at4 = dkz*at1;
zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I;
fxyz[4*lj] = zero;
fxyz[1+4*lj] = zero;
fxyz[2+4*lj] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[lj]*conjf(q[lj]));
wp += (double) at1;
/* sum1 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum1 += (wp + dd[0]);
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum2 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp,v_it,v_dky,v_at1, \
v_at2,v_at3,v_zt1,v_zt2,v_zt3,a,b,c,d,e,f,g,h,v_d,v_wp,dd) \
reduction(+:sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
v_wp = _mm512_set1_pd(0.0);
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+kk]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at3 = dky*at1; */
v_at3 = _mm512_mul_ps(v_dky,v_at1);
/* zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj)] = at2*zt1; */
/* fxyz[1+4*(j+kj)] = at3*zt1; */
/* fxyz[2+4*(j+kj)] = zero; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = v_zero;
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),b,
78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj)],d);
/* fxyz[4*(j+k1)] = at2*zt2; */
/* fxyz[1+4*(j+k1)] = -at3*zt2; */
/* fxyz[2+4*(j+k1)] = zero; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = v_zero;;
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),b,
78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1)],d);
/* fxyz[4*(j+kj+l1)] = zero; */
/* fxyz[1+4*(j+kj+l1)] = zero; */
/* fxyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],v_zero);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxyz[4*(j+kj)] = at2*zt1;
fxyz[1+4*(j+kj)] = at3*zt1;
fxyz[2+4*(j+kj)] = zero;
fxyz[4*(j+k1)] = at2*zt2;
fxyz[1+4*(j+k1)] = -at3*zt2;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+kj+l1)] = zero;
fxyz[1+4*(j+kj+l1)] = zero;
fxyz[2+4*(j+kj+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
wp += (double) at1;
}
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
}
/* mode numbers kx = 0, nx/2 */
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxyz[4*kj] = zero;
fxyz[1+4*kj] = at3*zt1;
fxyz[2+4*kj] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = zero;
fxyz[2+4*(kj+l1)] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj]*conjf(q[kj]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j])*cimagf(ffc[j]); */
v_at1 = _mm512_load_ps((float *)&ffc[j]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero,
v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero);
}
/* fxyz[4*j] = at2*zt1; */
/* fxyz[1+4*j] = zero; */
/* fxyz[2+4*j] = zero; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = v_zero;
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero,
177);
b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero,
177);
d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*j],a);
_mm512_store_ps((float *)&fxyz[8+4*j],b);
_mm512_store_ps((float *)&fxyz[16+4*j],c);
_mm512_store_ps((float *)&fxyz[24+4*j],d);
/* fxyz[4*(j+k1)] = zero; */
/* fxyz[1+4*(j+k1)] = zero; */
/* fxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero);
/* fxyz[4*(j+l1)] = zero; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j]*conjf(q[j])); */
v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxyz[4*j] = at2*zt1;
fxyz[1+4*j] = zero;
fxyz[2+4*j] = zero;
fxyz[4*(j+k1)] = zero;
fxyz[1+4*(j+k1)] = zero;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+l1)] = zero;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j]*conjf(q[j]));
wp += (double) at1;
}
fxyz[0] = zero;
fxyz[1] = zero;
fxyz[2] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
/* *we = wp*((float) nx)*((float) ny)*((float) nz); */
*we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncmcuperp3(float complex cu[], int nx, int ny, int nz, int nxvh,
int nyv, int nzv) {
/* this subroutine calculates the transverse current in fourier space
input: all, output: cu
approximate flop count is:
100*nxc*nyc*nzc + 36*(nxc*nyc + nxc*nzc + nyc*nzc)
and (nx/2)*nyc*nzc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the transverse current is calculated using the equation:
cux[kz][ky][kx] = cux[kz][ky][kx]
- kx*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
cuy([kz][ky][kx] = cuy[kz][ky][kx]
- ky*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
cuz[kz][ky][kx] = cuz[kz][ky][kx]
- kz*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers, except for
cux(kx=pi) = cuy(kx=pi) = cuz(kx=pi) = 0,
cux(ky=pi) = cuy(ky=pi) = cux(ky=pi) = 0,
cux(kz=pi) = cuy(kz=pi) = cuz(kz=pi) = 0,
cux(kx=0,ky=0,kz=0) = cuy(kx=0,ky=0,kz=0) = cuz(kx=0,ky=0,kz=0) = 0.
cu[l][k][j][i] = complex current density for fourier mode (j,k,l)
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
requires KNC, cu need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kj, lj, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, dky2, dkz2, dkyz2, at1;
float complex zero, zt1;
__m512i v_j, v_it;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_dkz2, v_dkyz2;
__m512 v_dk, v_at1, v_zt1, v_zt2, v_zero, v_one, v_at, v_as;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_one = _mm512_set1_ps(1.0f);
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,lj,kj,dkx,dky,dkz,dkz2,dkyz2,at1,zt1,v_it,v_dk, \
v_dkx,v_dky,v_dkz,v_dkz2,v_dkyz2,v_at1,v_zt1,v_zt2,v_at,v_as)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
dkz2 = dkz*dkz;
v_dkz2 = _mm512_set1_ps(dkz2);
/* add kz to gradient operator */
v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kj = nxvh*k;
k1 = nxvh*ny - kj;
dkyz2 = dky*dky + dkz2;
v_dkyz2 = _mm512_fmadd_ps(v_dky,v_dky,v_dkz2);
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dkyz2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] */
/* + dkz*cu[2+4*(j+kj+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj+lj)] -= dkx*zt1; */
/* cu[1+4*(j+kj+lj)] -= dky*zt1; */
/* cu[2+4*(j+kj+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj+lj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] */
/* + dkz*cu[2+4*(j+k1+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero,
v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+k1+lj)] -= dkx*zt1; */
/* cu[1+4*(j+k1+lj)] += dky*zt1; */
/* cu[2+4*(j+k1+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] */
/* - dkz*cu[2+4*(j+kj+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj+l1)] -= dkx*zt1; */
/* cu[1+4*(j+kj+l1)] -= dky*zt1; */
/* cu[2+4*(j+kj+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] */
/* - dkz*cu[2+4*(j+k1+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(15420),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+k1+l1)] -= dkx*zt1; */
/* cu[1+4*(j+k1+l1)] += dky*zt1; */
/* cu[2+4*(j+k1+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dkyz2);
zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)]
+ dkz*cu[2+4*(j+kj+lj)]);
cu[4*(j+kj+lj)] -= dkx*zt1;
cu[1+4*(j+kj+lj)] -= dky*zt1;
cu[2+4*(j+kj+lj)] -= dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)]
+ dkz*cu[2+4*(j+k1+lj)]);
cu[4*(j+k1+lj)] -= dkx*zt1;
cu[1+4*(j+k1+lj)] += dky*zt1;
cu[2+4*(j+k1+lj)] -= dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)]
- dkz*cu[2+4*(j+kj+l1)]);
cu[4*(j+kj+l1)] -= dkx*zt1;
cu[1+4*(j+kj+l1)] -= dky*zt1;
cu[2+4*(j+kj+l1)] += dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)]
- dkz*cu[2+4*(j+k1+l1)]);
cu[4*(j+k1+l1)] -= dkx*zt1;
cu[1+4*(j+k1+l1)] += dky*zt1;
cu[2+4*(j+k1+l1)] += dkz*zt1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kj = nxvh*k;
k1 = nxvh*ny - kj;
dky = dny*(float) k;
at1 = 1.0/(dky*dky + dkz2);
zt1 = at1*(dky*cu[1+4*(kj+lj)] + dkz*cu[2+4*(kj+lj)]);
cu[1+4*(kj+lj)] -= dky*zt1;
cu[2+4*(kj+lj)] -= dkz*zt1;
cu[4*(k1+lj)] = zero;
cu[1+4*(k1+lj)] = zero;
cu[2+4*(k1+lj)] = zero;
zt1 = at1*(dky*cu[1+4*(kj+l1)] - dkz*cu[2+4*(kj+l1)]);
cu[1+4*(kj+l1)] -= dky*zt1;
cu[2+4*(kj+l1)] += dkz*zt1;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dkz2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+lj)] -= dkx*zt1; */
/* cu[2+4*(j+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+lj)],v_zt2);
/* cu[4*(j+k1+lj)] = zero; */
/* cu[1+4*(j+k1+lj)] = zero; */
/* cu[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zero);
/* zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+l1)] -= dkx*zt1; */
/* cu[2+4*(j+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+l1)],v_zt2);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dkz2);
zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]);
cu[4*(j+lj)] -= dkx*zt1;
cu[2+4*(j+lj)] -= dkz*zt1;
cu[4*(j+k1+lj)] = zero;
cu[1+4*(j+k1+lj)] = zero;
cu[2+4*(j+k1+lj)] = zero;
zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]);
cu[4*(j+l1)] -= dkx*zt1;
cu[2+4*(j+l1)] += dkz*zt1;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
/* mode numbers kx = 0, nx/2 */
cu[2+4*lj] = zero;
cu[4*(k1+lj)] = zero;
cu[1+4*(k1+lj)] = zero;
cu[2+4*(k1+lj)] = zero;
cu[4*l1] = zero;
cu[1+4*l1] = zero;
cu[2+4*l1] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
#pragma omp parallel for \
private(j,k,k1,kj,dky,dky2,dkx,at1,zt1,v_it,v_dk,v_dkx,v_dky,v_dkyz2, \
v_at1,v_zt1,v_zt2,v_at,v_as)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kj = nxvh*k;
k1 = nxvh*ny - kj;
dky2 = dky*dky;
v_dkyz2 = _mm512_mul_ps(v_dky,v_dky);
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dky2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj)] -= dkx*zt1; */
/* cu[1+4*(j+kj)] -= dky*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero,
v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);;
}
/* cu[4*(j+k1)] -= dkx*zt1; */
/* cu[1+4*(j+k1)] += dky*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1)],v_zt2);
/* cu[4*(j+kj+l1)] = zero; */
/* cu[1+4*(j+kj+l1)] = zero; */
/* cu[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zero);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dky2);
zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]);
cu[4*(j+kj)] -= dkx*zt1;
cu[1+4*(j+kj)] -= dky*zt1;
zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]);
cu[4*(j+k1)] -= dkx*zt1;
cu[1+4*(j+k1)] += dky*zt1;
cu[4*(j+kj+l1)] = zero;
cu[1+4*(j+kj+l1)] = zero;
cu[2+4*(j+kj+l1)] = zero;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kj = nxvh*k;
k1 = nxvh*ny - kj;
cu[1+4*kj] = zero;
cu[4*k1] = zero;
cu[1+4*k1] = zero;
cu[2+4*k1] = zero;
cu[4*(kj+l1)] = zero;
cu[1+4*(kj+l1)] = zero;
cu[2+4*(kj+l1)] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
v_zt2 = _mm512_load_ps((float *)&cu[4*j]);
/* zero out kx = 0 mode */
if (j==0) {
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(255),v_zero);
}
/* cu[4*j] = zero; */
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(771),v_zero);
_mm512_store_ps((float *)&cu[4*j],v_zt2);
/* cu[4*(j+k1)] = zero; */
/* cu[1+4*(j+k1)] = zero; */
/* cu[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1)],v_zero);
/* cu[4*(j+l1)] = zero; */
/* cu[1+4*(j+l1)] = zero; */
/* cu[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+l1)],v_zero);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
cu[4*j] = zero;
cu[4*(j+k1)] = zero;
cu[1+4*(j+k1)] = zero;
cu[2+4*(j+k1)] = zero;
cu[4*(j+l1)] = zero;
cu[1+4*(j+l1)] = zero;
cu[2+4*(j+l1)] = zero;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
cu[0] = zero;
cu[1] = zero;
cu[2] = zero;
cu[4*k1] = zero;
cu[1+4*k1] = zero;
cu[2+4*k1] = zero;
cu[4*l1] = zero;
cu[1+4*l1] = zero;
cu[2+4*l1] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
return;
}
/*--------------------------------------------------------------------*/
void ckncmibpois33(float complex cu[], float complex bxyz[],
float complex ffc[], float ci, float *wm, int nx,
int ny, int nz, int nxvh, int nyv, int nzv, int nxhd,
int nyhd, int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
magnetic field with periodic boundary conditions.
input: cu,ffc,ci,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
output: bxyz, wm
approximate flop count is:
193*nxc*nyc*nzc + 84*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the magnetic field is calculated using the equations:
bx[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(ky*cuz[kz][ky][kx]-kz*cuy[kz][ky][kx]),
by[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(kz*cux[kz][ky][kx]-kx*cuz[kz][ky][kx]),
bz[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(kx*cuy[kz][ky][kx]-ky*cux[kz][ky][kx]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s(kx,ky,kz),
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0,
bx(ky=pi) = by(ky=pi) = bx(ky=pi) = 0,
bx(kz=pi) = by(kz=pi) = bz(kz=pi) = 0,
bx(kx=0,ky=0,kz=0) = by(kx=0,ky=0,kz=0) = bz(kx=0,ky=0,kz=0) = 0.
cu[l][k][j][i] = complex current density for fourier mode (j,k,l)
bxyz[l][k][j][i] = i component of complex magnetic field
all for fourier mode (j,k,l)
aimag(ffc(j,k,l)) = finite-size particle shape factor s
for fourier mode (j,k,l)
real(ffc(j,k,l)) = potential green's function g
for fourier mode (j,k,l)
ci = reciprocal of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*ci*ci
|cu[kz][ky][kx]*s[kz][ky][kx]|**2)
this expression is valid only if the current is divergence-free
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
nxhd = dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
requires KNC, cu, bxyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu, bxyz need to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dky, dkz, ci2, at1, at2, at3, at4;
float complex zero, zt1, zt2, zt3;
double wp, sum1, sum2;
__m512i v_j, v_it, v_n, v_m;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_ci2;
__m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4, v_zero;
__m512 v_zt1, v_zt2, v_zt3, v_zt4;
__m512d v_wp, v_d;
__attribute__((aligned(64))) double dd[8];
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4);
v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_ci2 = _mm512_set1_ps(ci2);
/* calculate magnetic field and sum field energy */
sum1 = 0.0;
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,zt3, \
wp,v_it,v_dkx,v_dky,v_dkz,v_dk1,v_dk2,v_at1,v_at2,v_at3,v_at4,v_zt1, \
v_zt2,v_zt3,v_zt4,v_d,v_wp,dd) \
reduction(+:sum1)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
wp = 0.0;
v_wp = _mm512_set1_pd(0.0);
/* add kz to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),
v_dky);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),
v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at3 = dky*at1; */
/* at4 = dkz*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+kk+ll]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+kj+lj)]) */
/* + crealf(cu[2+4*(j+kj+lj)])*_Complex_I;/ */
/* zt2 = -cimagf(cu[1+4*(j+kj+lj)]) */
/* + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj+lj)]) */
/* + crealf(cu[4*(j+kj+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; */
/* bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt1);
/* wp += at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) */
/* + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) */
/* + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* zt1 = -cimagf(cu[2+4*(j+k1+lj)]) */
/* + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1+lj)]) */
/* + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1+lj)]) */
/* + crealf(cu[4*(j+k1+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),
v_zero,v_at3);
/* bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; */
/* bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt1);
/* wp += at1*(cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) */
/* + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) */
/* + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* zt1 = -cimagf(cu[2+4*(j+kj+l1)]) */
/* + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+kj+l1)]) */
/* + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj+l1)]) */
/* + crealf(cu[4*(j+kj+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),
v_zero,v_at3);
/* bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; */
/* bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt1);
/* wp += at1*(cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) */
/* + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) */
/* + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* zt1 = -cimagf(cu[2+4*(j+k1+l1)]) */
/* + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1+l1)]) */
/* + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1+l1)]) */
/* + crealf(cu[4*(j+k1+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(13107),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3855),
v_zero,v_at3);
/* bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; */
/* bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt1);
/* wp += at1*(cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) */
/* + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) */
/* + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[j+kk+ll]);
zt1 = -cimagf(cu[2+4*(j+kj+lj)])
+ crealf(cu[2+4*(j+kj+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj+lj)])
+ crealf(cu[1+4*(j+kj+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj+lj)])
+ crealf(cu[4*(j+kj+lj)])*_Complex_I;
bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2;
bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1+lj)])
+ crealf(cu[2+4*(j+k1+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1+lj)])
+ crealf(cu[1+4*(j+k1+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1+lj)])
+ crealf(cu[4*(j+k1+lj)])*_Complex_I;
bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2;
bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3;
zt1 = -cimagf(cu[2+4*(j+kj+l1)])
+ crealf(cu[2+4*(j+kj+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj+l1)])
+ crealf(cu[1+4*(j+kj+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj+l1)])
+ crealf(cu[4*(j+kj+l1)])*_Complex_I;
bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2;
bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1+l1)])
+ crealf(cu[2+4*(j+k1+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1+l1)])
+ crealf(cu[1+4*(j+k1+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1+l1)])
+ crealf(cu[4*(j+k1+l1)])*_Complex_I;
bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2;
bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3;
at1 = at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)])
+ cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)])
+ cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])
+ cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)])
+ cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)])
+ cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])
+ cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)])
+ cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)])
+ cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])
+ cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)])
+ cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)])
+ cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = ci2*crealf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[kk+ll]);
zt1 = -cimagf(cu[2+4*(kj+lj)])
+ crealf(cu[2+4*(kj+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(kj+lj)])
+ crealf(cu[1+4*(kj+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj+lj)])
+ crealf(cu[4*(kj+lj)])*_Complex_I;
bxyz[4*(kj+lj)] = at3*zt1 - at4*zt2;
bxyz[1+4*(kj+lj)] = at4*zt3;
bxyz[2+4*(kj+lj)] = -at3*zt3;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
zt1 = -cimagf(cu[2+4*(kj+l1)])
+ crealf(cu[2+4*(kj+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(kj+l1)])
+ crealf(cu[1+4*(kj+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj+l1)])
+ crealf(cu[4*(kj+l1)])*_Complex_I;
bxyz[4*(kj+l1)] = at3*zt1 + at4*zt2;
bxyz[1+4*(kj+l1)] = -at4*zt3;
bxyz[2+4*(kj+l1)] = -at3*zt3;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*(kj+lj)]*conjf(cu[4*(kj+lj)])
+ cu[1+4*(kj+lj)]*conjf(cu[1+4*(kj+lj)])
+ cu[2+4*(kj+lj)]*conjf(cu[2+4*(kj+lj)])
+ cu[4*(kj+l1)]*conjf(cu[4*(kj+l1)])
+ cu[1+4*(kj+l1)]*conjf(cu[1+4*(kj+l1)])
+ cu[2+4*(kj+l1)]*conjf(cu[2+4*(kj+l1)]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at4 = dkz*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+ll]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+lj)]) */
/* + crealf(cu[2+4*(j+lj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+lj)]) */
/* + crealf(cu[1+4*(j+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+lj)]) */
/* + crealf(cu[4*(j+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+lj)] = -at4*zt2; */
/* bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+lj)] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt1);
/* wp += at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) */
/* + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) */
/* + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* bxyz[4*(j+k1+lj)] = zero; */
/* bxyz[1+4*(j+k1+lj)] = zero; */
/* bxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero);
/* zt1 = -cimagf(cu[2+4*(j+l1)]) */
/* + crealf(cu[2+4*(j+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+l1)]) */
/* + crealf(cu[1+4*(j+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+l1)]) */
/* + crealf(cu[4*(j+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),v_zero,
v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),v_zero,
v_at3);
/* bxyz[4*(j+l1)] = at4*zt2; */
/* bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+l1)] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt1);
/* wp += at1*(cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) */
/* + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) */
/* + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[j+ll]);
zt1 = -cimagf(cu[2+4*(j+lj)])
+ crealf(cu[2+4*(j+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+lj)])
+ crealf(cu[1+4*(j+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+lj)])
+ crealf(cu[4*(j+lj)])*_Complex_I;
bxyz[4*(j+lj)] = -at4*zt2;
bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+lj)] = at2*zt2;
bxyz[4*(j+k1+lj)] = zero;
bxyz[1+4*(j+k1+lj)] = zero;
bxyz[2+4*(j+k1+lj)] = zero;
zt1 = -cimagf(cu[2+4*(j+l1)])
+ crealf(cu[2+4*(j+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+l1)])
+ crealf(cu[1+4*(j+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+l1)])
+ crealf(cu[4*(j+l1)])*_Complex_I;
bxyz[4*(j+l1)] = at4*zt2;
bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+l1)] = at2*zt2;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)])
+ cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)])
+ cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)])
+ cu[4*(j+l1)]*conjf(cu[4*(j+l1)])
+ cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)])
+ cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = ci2*crealf(ffc[ll]);
at4 = dkz*at1;
at1 = at1*cimagf(ffc[ll]);
zt2 = -cimagf(cu[1+4*(lj)]) + crealf(cu[1+4*(lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(lj)]) + crealf(cu[4*(lj)])*_Complex_I;
bxyz[4*lj] = -at4*zt2;
bxyz[1+4*lj] = at4*zt3;
bxyz[2+4*lj] = zero;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*lj]*conjf(cu[4*lj])
+ cu[1+4*lj]*conjf(cu[1+4*lj])
+ cu[2+4*lj]*conjf(cu[2+4*lj]));
wp += (double) at1;
/* sum1 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum1 += (wp + dd[0]);
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum2 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp,v_it,v_dkx,v_dky, \
v_dk1,v_dk2,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,v_d,v_wp, \
dd) \
reduction(+:sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
v_wp = _mm512_set1_pd(0.0);
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at3 = dky*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+kk]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+kj)]) */
/* + crealf(cu[2+4*(j+kj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+kj)]) */
/* + crealf(cu[1+4*(j+kj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj)]) */
/* + crealf(cu[4*(j+kj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+kj)] = at3*zt1; */
/* bxyz[1+4*(j+kj)] = -at2*zt1; */
/* bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt1);
/* wp += at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) */
/* + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) */
/* + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* zt1 = -cimagf(cu[2+4*(j+k1)]) */
/* + crealf(cu[2+4*(j+k1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1)]) */
/* + crealf(cu[1+4*(j+k1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1)]) */
/* + crealf(cu[4*(j+k1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),v_zero,
v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),v_zero,
v_at3);
/* bxyz[4*(j+k1)] = -at3*zt1; */
/* bxyz[1+4*(j+k1)] = -at2*zt1; */
/* bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt1);
/* wp += at1*(cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) */
/* + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) */
/* + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+kj+l1)] = zero; */
/* bxyz[1+4*(j+kj+l1)] = zero; */
/* bxyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at1 = at1*cimagf(ffc[j+kk]);
zt1 = -cimagf(cu[2+4*(j+kj)])
+ crealf(cu[2+4*(j+kj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj)])
+ crealf(cu[1+4*(j+kj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj)])
+ crealf(cu[4*(j+kj)])*_Complex_I;
bxyz[4*(j+kj)] = at3*zt1;
bxyz[1+4*(j+kj)] = -at2*zt1;
bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1)])
+ crealf(cu[2+4*(j+k1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1)])
+ crealf(cu[1+4*(j+k1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1)])
+ crealf(cu[4*(j+k1)])*_Complex_I;
bxyz[4*(j+k1)] = -at3*zt1;
bxyz[1+4*(j+k1)] = -at2*zt1;
bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3;
bxyz[4*(j+kj+l1)] = zero;
bxyz[1+4*(j+kj+l1)] = zero;
bxyz[2+4*(j+kj+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)])
+ cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)])
+ cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])
+ cu[4*(j+k1)]*conjf(cu[4*(j+k1)])
+ cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)])
+ cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)]));
wp += (double) at1;
}
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
}
/* mode numbers kx = 0, nx/2 */
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = ci2*crealf(ffc[kk]);
at3 = at1*dny*(float) k;
at1 = at1*cimagf(ffc[kk]);
zt1 = -cimagf(cu[2+4*(kj)]) + crealf(cu[2+4*(kj)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj)]) + crealf(cu[4*(kj)])*_Complex_I;
bxyz[4*kj] = at3*zt1;
bxyz[1+4*kj] = zero;
bxyz[2+4*kj] = -at3*zt3;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
bxyz[4*(kj+l1)] = zero;
bxyz[1+4*(kj+l1)] = zero;
bxyz[2+4*(kj+l1)] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*kj]*conjf(cu[4*kj])
+ cu[1+4*kj]*conjf(cu[1+4*kj])
+ cu[2+4*kj]*conjf(cu[2+4*kj]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*j]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*j] = zero; */
/* bxyz[1+4*j] = -at2*zt1; */
/* bxyz[2+4*j] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),v_zero);
}
_mm512_store_ps((float *)&bxyz[4*j],v_zt1);
/* wp += at1*(cu[4*j]*conjf(cu[4*j]) */
/* + cu[1+4*j]*conjf(cu[1+4*j]) */
/* + cu[2+4*j]*conjf(cu[2+4*j])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1)] = zero; */
/* bxyz[1+4*(j+k1)] = zero; */
/* bxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero);
/* bxyz[4*(j+l1)] = zero; */
/* bxyz[1+4*(j+l1)] = zero; */
/* bxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j]);
at2 = at1*dnx*(float) j;
at1 = at1*cimagf(ffc[j]);
zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I;
zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I;
bxyz[4*j] = zero;
bxyz[1+4*j] = -at2*zt1;
bxyz[2+4*j] = at2*zt2;
bxyz[4*(j+k1)] = zero;
bxyz[1+4*(j+k1)] = zero;
bxyz[2+4*(j+k1)] = zero;
bxyz[4*(j+l1)] = zero;
bxyz[1+4*(j+l1)] = zero;
bxyz[2+4*(j+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*j]*conjf(cu[4*j])
+ cu[1+4*j]*conjf(cu[1+4*j])
+ cu[2+4*j]*conjf(cu[2+4*j]));
wp += (double) at1;
}
bxyz[0] = zero;
bxyz[1] = zero;
bxyz[2] = zero;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
/* *wm = wp*((float) nx)*((float) ny)*((float) nz); */
*wm = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncmmaxwel3(float complex exyz[], float complex bxyz[],
float complex cu[], float complex ffc[], float ci,
float dt, float *wf, float *wm, int nx, int ny,
int nz, int nxvh, int nyv, int nzv, int nxhd,
int nyhd, int nzhd) {
/* this subroutine solves 3d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions.
input: all, output: wf, wm, exyz, bxyz
approximate flop count is:
680*nxc*nyc*nzc + 149*(nxc*nyc + nxc*nzc + nyc*nzc)
plus nxc*nyc*nzc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the magnetic field is first updated half a step using the equations:
bx[kz][ky][kx] = bx[kz][ky][kx] - .5*dt*sqrt(-1)*
(ky*ez[kz][ky][kx]-kz*ey[kz][ky][kx])
by[kz][ky][kx] = by[kz][ky][kx] - .5*dt*sqrt(-1)*
(kz*ex[kz][ky][kx]-kx*ez[kz][ky][kx])
bz[kz][ky][kx] = bz[kz][ky][kx] - .5*dt*sqrt(-1)*
(kx*ey[kz][ky][kx]-ky*ex[kz][ky][kx])
the electric field is then updated a whole step using the equations:
ex[kz][ky][kx] = ex[kz][ky][kx] + c2*dt*sqrt(-1)
*(ky*bz[kz][ky][kx]-kz*by[kz][ky][kx])
- affp*dt*cux[kz][ky][kx]*s[kz][ky][kx]
ey[kz][ky][kx] = ey[kz][ky][kx] + c2*dt*sqrt(-1)*
*(kz*bx[kz][ky][kx]-kx*bz[kz][ky][kx])
- affp*dt*cuy[kz][ky][kx]*s[kz][ky][kx]
ez[kz][ky][kx] = ez[kz][ky][kx] + c2*dt*sqrt(-1)
*(kx*by[kz][ky][kx]-ky*bx[kz][ky][kx])
- affp*dt*cuz[kz][ky][kx]*s[kz][ky][kx]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, c2 = 1./(ci*ci)
and s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)
j,k,l = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kz=pi) = ey(kz=pi) = ez(kz=pi) = 0,
ex(kx=0,ky=0,kz=0) = ey(kx=0,ky=0,kz=0) = ez(kx=0,ky=0,kz=0) = 0.
and similarly for bx, by, bz.
cu[l][k][j][i] = complex current density
exyz[l][k][j][i] = complex transverse electric field
bxyz[l][k][j][i] = complex magnetic field
for component i, all for fourier mode (j1,k,l)
real(ffc[0][0][0]) = affp = normalization constant = nx*ny*nz/np,
where np=number of particles
aimag(ffc[l][k][j]) = finite-size particle shape factor s,
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2)
for fourier mode (j,k,l)
ci = reciprocal of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny*nz**sum((1/affp)*|exyz[kz][ky][kx]|**2)
magnetic field energy is also calculated, using
wm = nx*ny*nz**sum((c2/affp)*|bxyz[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
nxhd = second dimension of form factor array, must be >= nxh
nyhd = third dimension of form factor array, must be >= nyh
nzhd = fourth dimension of form factor array, must be >= nzh
requires KNC, cu, exyz, bxyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu, exyz, bxyz needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dth, c2, cdt, affp, anorm, dkx, dky, dkz;
float adt, afdt;
float at1;
float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
double wp, ws, sum1, sum2, sum3, sum4;
__m512i v_j, v_it, v_n, v_m;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz;
__m512 v_zero, v_cdt, v_adt, v_afdt, v_dth, v_anorm;
__m512 v_dk1, v_dk2, v_at2, v_at3;
__m512 v_zt1, v_zt2, v_zt3, v_zt4, v_zt5, v_zt6, v_zt7;
__m512d v_wp, v_ws, v_d;
__attribute__((aligned(64))) double dd[8];
if (ci <= 0.0)
return;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
dth = 0.5*dt;
c2 = 1.0/(ci*ci);
cdt = c2*dt;
affp = creal(ffc[0]);
adt = affp*dt;
zero = 0.0 + 0.0*_Complex_I;
anorm = 1.0/affp;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4);
v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_cdt = _mm512_set1_ps(cdt);
v_adt = _mm512_set1_ps(adt);
v_dth = _mm512_set1_ps(dth);
v_anorm = _mm512_set1_ps(anorm);
/* update electromagnetic field and sum field energies */
sum1 = 0.0;
sum2 = 0.0;
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dkz,dky,dkx,afdt,at1,zt1,zt2,zt3,zt4, \
zt5,zt6,zt7,zt8,zt9,ws,wp,v_it,v_dkx,v_dky,v_dkz,v_dk1,v_dk2,v_afdt, \
v_at2,v_at3,v_zt1,v_zt2,v_zt3,v_zt4,v_zt5,v_zt6,v_zt7,v_d,v_ws,v_wp,dd) \
reduction(+:sum1,sum2)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
ws = 0.0;
wp = 0.0;
v_ws = _mm512_set1_pd(0.0);
v_wp = _mm512_set1_pd(0.0);
/* add kz to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),
v_dky);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),
v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+kk+ll]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, ky > 0, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) */
/* + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) */
/* + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj+lj)]) */
/* + crealf(exyz[4*(j+kj+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); */
/* zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) */
/* - afdt*cu[4*(j+kj+lj)]; */
/* zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+kj+lj)]; */
/* zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj+lj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj+lj)] = zt7; */
/* exyz[1+4*(j+kj+lj)] = zt8; */
/* exyz[2+4*(j+kj+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 -= dth*(dky*zt1 - dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj+lj)] = zt4; */
/* bxyz[1+4*(j+kj+lj)] = zt5; */
/* bxyz[2+4*(j+kj+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* update magnetic field half time step, ky < 0, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) */
/* + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) */
/* + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1+lj)]) */
/* + crealf(exyz[4*(j+k1+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); */
/* zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) */
/* - afdt*cu[4*(j+k1+lj)]; */
/* zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+k1+lj)]; */
/* zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1+lj)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1+lj)] = zt7; */
/* exyz[1+4*(j+k1+lj)] = zt8; */
/* exyz[2+4*(j+k1+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1 + dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1+lj)] = zt4; */
/* bxyz[1+4*(j+k1+lj)] = zt5; */
/* bxyz[2+4*(j+k1+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* update magnetic field half time step, ky > 0, kz < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) */
/* + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) */
/* + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj+l1)]) */
/* + crealf(exyz[4*(j+kj+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); */
/* zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) */
/* - afdt*cu[4*(j+kj+l1)]; */
/* zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+kj+l1)]; */
/* zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj+l1)] = zt7; */
/* exyz[1+4*(j+kj+l1)] = zt8; */
/* exyz[2+4*(j+kj+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 -= dth*(dky*zt1 + dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj+l1)] = zt4; */
/* bxyz[1+4*(j+kj+l1)] = zt5; */
/* bxyz[2+4*(j+kj+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* update magnetic field half time step, ky < 0, kz < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) */
/* + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) */
/* + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1+l1)]) */
/* + crealf(exyz[4*(j+k1+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(13107),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3855),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); */
/* zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) */
/* - afdt*cu[4*(j+k1+l1)]; */
/* zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+k1+l1)]; */
/* zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1+l1)] = zt7; */
/* exyz[1+4*(j+k1+l1)] = zt8; */
/* exyz[2+4*(j+k1+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1 - dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1+l1)] = zt4; */
/* bxyz[1+4*(j+k1+l1)] = zt5; */
/* bxyz[2+4*(j+k1+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk+ll]);
/* update magnetic field half time step, ky > 0, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+kj+lj)])
+ crealf(exyz[2+4*(j+kj+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj+lj)])
+ crealf(exyz[1+4*(j+kj+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj+lj)])
+ crealf(exyz[4*(j+kj+lj)])*_Complex_I;
zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(j+kj+lj)];
zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+kj+lj)];
zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj+lj)] = zt7;
exyz[1+4*(j+kj+lj)] = zt8;
exyz[2+4*(j+kj+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 - dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj+lj)] = zt4;
bxyz[1+4*(j+kj+lj)] = zt5;
bxyz[2+4*(j+kj+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+k1+lj)])
+ crealf(exyz[2+4*(j+k1+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1+lj)])
+ crealf(exyz[1+4*(j+k1+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1+lj)])
+ crealf(exyz[4*(j+k1+lj)])*_Complex_I;
zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(j+k1+lj)];
zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+k1+lj)];
zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1+lj)] = zt7;
exyz[1+4*(j+k1+lj)] = zt8;
exyz[2+4*(j+k1+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1 + dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1+lj)] = zt4;
bxyz[1+4*(j+k1+lj)] = zt5;
bxyz[2+4*(j+k1+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky > 0, kz < 0 */
zt1 = -cimagf(exyz[2+4*(j+kj+l1)])
+ crealf(exyz[2+4*(j+kj+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj+l1)])
+ crealf(exyz[1+4*(j+kj+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj+l1)])
+ crealf(exyz[4*(j+kj+l1)])*_Complex_I;
zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(j+kj+l1)];
zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+kj+l1)];
zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj+l1)] = zt7;
exyz[1+4*(j+kj+l1)] = zt8;
exyz[2+4*(j+kj+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 + dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj+l1)] = zt4;
bxyz[1+4*(j+kj+l1)] = zt5;
bxyz[2+4*(j+kj+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0, kz < 0 */
zt1 = -cimagf(exyz[2+4*(j+k1+l1)])
+ crealf(exyz[2+4*(j+k1+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1+l1)])
+ crealf(exyz[1+4*(j+k1+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1+l1)])
+ crealf(exyz[4*(j+k1+l1)])*_Complex_I;
zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(j+k1+l1)];
zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+k1+l1)];
zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1+l1)] = zt7;
exyz[1+4*(j+k1+l1)] = zt8;
exyz[2+4*(j+k1+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1 - dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1+l1)] = zt4;
bxyz[1+4*(j+k1+l1)] = zt5;
bxyz[2+4*(j+k1+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
afdt = adt*cimagf(ffc[kk+ll]);
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(kj+lj)])
+ crealf(exyz[2+4*(kj+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(kj+lj)])
+ crealf(exyz[1+4*(kj+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj+lj)])
+ crealf(exyz[4*(kj+lj)])*_Complex_I;
zt4 = bxyz[4*(kj+lj)] - dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(kj+lj)] - dth*(dkz*zt3);
zt6 = bxyz[2+4*(kj+lj)] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(kj+lj)] + cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(kj+lj)];
zt8 = exyz[1+4*(kj+lj)] + cdt*(dkz*zt3) - afdt*cu[1+4*(kj+lj)];
zt9 = exyz[2+4*(kj+lj)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(kj+lj)] = zt7;
exyz[1+4*(kj+lj)] = zt8;
exyz[2+4*(kj+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 - dkz*zt2);
zt5 -= dth*(dkz*zt3);
zt6 += dth*(dky*zt3);
bxyz[4*(kj+lj)] = zt4;
bxyz[1+4*(kj+lj)] = zt5;
bxyz[2+4*(kj+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
exyz[4*(k1+lj)] = zero;
exyz[1+4*(k1+lj)] = zero;
exyz[2+4*(k1+lj)] = zero;
/* update magnetic field half time step, kz < 0 */
zt1 = -cimagf(exyz[2+4*(kj+l1)])
+ crealf(exyz[2+4*(kj+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(kj+l1)])
+ crealf(exyz[1+4*(kj+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj+l1)])
+ crealf(exyz[4*(kj+l1)])*_Complex_I;
zt4 = bxyz[4*(kj+l1)] - dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(kj+l1)] + dth*(dkz*zt3);
zt6 = bxyz[2+4*(kj+l1)] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(kj+l1)] + cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(kj+l1)];
zt8 = exyz[1+4*(kj+l1)] - cdt*(dkz*zt3) - afdt*cu[1+4*(kj+l1)];
zt9 = exyz[2+4*(kj+l1)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(kj+l1)] = zt7;
exyz[1+4*(kj+l1)] = zt8;
exyz[2+4*(kj+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 + dkz*zt2);
zt5 += dth*(dkz*zt3);
zt6 += dth*(dky*zt3);
bxyz[4*(kj+l1)] = zt4;
bxyz[1+4*(kj+l1)] = zt5;
bxyz[2+4*(kj+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+ll]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+ll+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+lj)]) */
/* + crealf(exyz[2+4*(j+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+lj)]) */
/* + crealf(exyz[1+4*(j+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+lj)]) */
/* + crealf(exyz[4*(j+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); */
/* zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; */
/* zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+lj)]; */
/* zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+lj)] = zt7; */
/* exyz[1+4*(j+lj)] = zt8; */
/* exyz[2+4*(j+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 += dth*(dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+lj)] = zt4; */
/* bxyz[1+4*(j+lj)] = zt5; */
/* bxyz[2+4*(j+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* bxyz[4*(j+k1+lj)] = zero; */
/* bxyz[1+4*(j+k1+lj)] = zero; */
/* bxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero);
/* exyz[4*(j+k1+lj)] = zero; */
/* exyz[1+4*(j+k1+lj)] = zero; */
/* exyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zero);
/* update magnetic field half time step, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+l1)]) */
/* + crealf(exyz[2+4*(j+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+l1)]) */
/* + crealf(exyz[1+4*(j+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+l1)]) */
/* + crealf(exyz[4*(j+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),v_zero,
v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),v_zero,
v_dk2);
/* zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); */
/* zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; */
/* zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+l1)]; */
/* zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+l1)] = zt7; */
/* exyz[1+4*(j+l1)] = zt8; */
/* exyz[2+4*(j+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 -= dth*(dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+l1)] = zt4; */
/* bxyz[1+4*(j+l1)] = zt5; */
/* bxyz[2+4*(j+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+ll]);
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+lj)])
+ crealf(exyz[2+4*(j+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+lj)])
+ crealf(exyz[1+4*(j+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+lj)])
+ crealf(exyz[4*(j+lj)])*_Complex_I;
zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2);
zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)];
zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+lj)];
zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+lj)] = zt7;
exyz[1+4*(j+lj)] = zt8;
exyz[2+4*(j+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*(j+lj)] = zt4;
bxyz[1+4*(j+lj)] = zt5;
bxyz[2+4*(j+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1+lj)] = zero;
bxyz[1+4*(j+k1+lj)] = zero;
bxyz[2+4*(j+k1+lj)] = zero;
exyz[4*(j+k1+lj)] = zero;
exyz[1+4*(j+k1+lj)] = zero;
exyz[2+4*(j+k1+lj)] = zero;
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+l1)])
+ crealf(exyz[2+4*(j+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+l1)])
+ crealf(exyz[1+4*(j+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+l1)])
+ crealf(exyz[4*(j+l1)])*_Complex_I;
zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2);
zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)];
zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+l1)];
zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+l1)] = zt7;
exyz[1+4*(j+l1)] = zt8;
exyz[2+4*(j+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*(j+l1)] = zt4;
bxyz[1+4*(j+l1)] = zt5;
bxyz[2+4*(j+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
/* mode numbers kx = 0, nx/2 */
afdt = adt*cimagf(ffc[ll]);
/* update magnetic field half time step */
zt2 = -cimagf(exyz[1+4*(lj)]) + crealf(exyz[1+4*(lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(lj)]) + crealf(exyz[4*(lj)])*_Complex_I;
zt4 = bxyz[4*lj] + dth*(dkz*zt2);
zt5 = bxyz[1+4*lj] - dth*(dkz*zt3);
/* update electric field whole time step */
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*lj] - cdt*(dkz*zt2) - afdt*cu[4*lj];
zt8 = exyz[1+4*lj] + cdt*(dkz*zt3) - afdt*cu[1+4*lj];
/* update magnetic field half time step and store electric field */
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*lj] = zt7;
exyz[1+4*lj] = zt8;
exyz[2+4*lj] = zero;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8));
ws += (double) at1;
zt4 += dth*(dkz*zt2);
zt5 -= dth*(dkz*zt3);
bxyz[4*lj] = zt4;
bxyz[1+4*lj] = zt5;
bxyz[2+4*lj] = zero;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5));
wp += (double) at1;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
exyz[4*(k1+lj)] = zero;
exyz[1+4*(k1+lj)] = zero;
exyz[2+4*(k1+lj)] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
exyz[4*l1] = zero;
exyz[1+4*l1] = zero;
exyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
/* sum1 += ws; */
_mm512_store_pd(&dd[0],v_ws);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum1 += (ws + dd[0]);
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum3 = 0.0;
sum4 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,dkx,afdt,at1,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8, \
zt9,ws,wp,v_it,v_dkx,v_dky,v_dk1,v_dk2,v_afdt,v_at2,v_at3,v_zt1,v_zt2, \
v_zt3,v_zt4,v_zt5,v_zt6,v_zt7,v_d,v_ws,v_wp,dd) \
reduction(+:sum3,sum4)
for (k = 1; k < nyh; k++) {
/* dky = dny*(float) k; */
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
ws = 0.0;
wp = 0.0;
v_ws = _mm512_set1_pd(0.0);
v_wp = _mm512_set1_pd(0.0);
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+kk]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, ky > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj)]) */
/* + crealf(exyz[2+4*(j+kj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj)]) */
/* + crealf(exyz[1+4*(j+kj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj)]) */
/* + crealf(exyz[4*(j+kj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); */
/* zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; */
/* zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; */
/* zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj)] = zt7; */
/* exyz[1+4*(j+kj)] = zt8; */
/* exyz[2+4*(j+kj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 -= dth*(dky*zt1); */
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj)] = zt4; */
/* bxyz[1+4*(j+kj)] = zt5; */
/* bxyz[2+4*(j+kj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* update magnetic field half time step, ky < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1)]) */
/* + crealf(exyz[2+4*(j+k1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1)]) */
/* + crealf(exyz[1+4*(j+k1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1)]) */
/* + crealf(exyz[4*(j+k1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),v_zero,
v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),v_zero,
v_dk2);
/* zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); */
/* zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; */
/* zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; */
/* zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1)] = zt7; */
/* exyz[1+4*(j+k1)] = zt8; */
/* exyz[2+4*(j+k1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1); */
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1)] = zt4; */
/* bxyz[1+4*(j+k1)] = zt5; */
/* bxyz[2+4*(j+k1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+kj+l1)] = zero; */
/* bxyz[1+4*(j+kj+l1)] = zero; */
/* bxyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero);
/* exyz[4*(j+kj+l1)] = zero; */
/* exyz[1+4*(j+kj+l1)] = zero; */
/* exyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk]);
/* update magnetic field half time step, ky > 0 */
zt1 = -cimagf(exyz[2+4*(j+kj)])
+ crealf(exyz[2+4*(j+kj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj)])
+ crealf(exyz[1+4*(j+kj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj)])
+ crealf(exyz[4*(j+kj)])*_Complex_I;
zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1);
zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1);
zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)];
zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)];
zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj)] = zt7;
exyz[1+4*(j+kj)] = zt8;
exyz[2+4*(j+kj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj)] = zt4;
bxyz[1+4*(j+kj)] = zt5;
bxyz[2+4*(j+kj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0 */
zt1 = -cimagf(exyz[2+4*(j+k1)])
+ crealf(exyz[2+4*(j+k1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1)])
+ crealf(exyz[1+4*(j+k1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1)])
+ crealf(exyz[4*(j+k1)])*_Complex_I;
zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1);
zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1);
zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)];
zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)];
zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1)] = zt7;
exyz[1+4*(j+k1)] = zt8;
exyz[2+4*(j+k1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1)] = zt4;
bxyz[1+4*(j+k1)] = zt5;
bxyz[2+4*(j+k1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+kj+l1)] = zero;
bxyz[1+4*(j+kj+l1)] = zero;
bxyz[2+4*(j+kj+l1)] = zero;
exyz[4*(j+kj+l1)] = zero;
exyz[1+4*(j+kj+l1)] = zero;
exyz[2+4*(j+kj+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
/* sum3 += ws; */
_mm512_store_pd(&dd[0],v_ws);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum3 += (ws + dd[0]);
/* sum4 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum4 += (wp + dd[0]);
}
/* mode numbers kx = 0, nx/2 */
ws = 0.0;
wp = 0.0;
v_ws = _mm512_set1_pd(0.0);
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
afdt = adt*cimagf(ffc[kk]);
/* update magnetic field half time step */
zt1 = -cimagf(exyz[2+4*(kj)]) + crealf(exyz[2+4*(kj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj)]) + crealf(exyz[4*(kj)])*_Complex_I;
zt4 = bxyz[4*kj] - dth*(dky*zt1);
zt6 = bxyz[2+4*kj] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*kj] + cdt*(dky*zt1) - afdt*cu[4*kj];
zt9 = exyz[2+4*kj] - cdt*(dky*zt3) - afdt*cu[2+4*kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*kj] = zt7;
exyz[1+4*kj] = zero;
exyz[2+4*kj] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt6 += dth*(dky*zt3);
bxyz[4*kj] = zt4;
bxyz[1+4*kj] = zero;
bxyz[2+4*kj] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
exyz[4*k1] = zero;
exyz[1+4*k1] = zero;
exyz[2+4*k1] = zero;
bxyz[4*(kj+l1)] = zero;
bxyz[1+4*(kj+l1)] = zero;
bxyz[2+4*(kj+l1)]= zero;
exyz[4*(kj+l1)] = zero;
exyz[1+4*(kj+l1)] = zero;
exyz[2+4*(kj+l1)] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx);
/* afdt = adt*cimagf(ffc[j]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step */
/* zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt5 = bxyz[1+4*j] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*j] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*j]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; */
/* zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*j]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),v_afdt,
v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*j] = zero; */
/* exyz[1+4*j] = zt8; */
/* exyz[2+4*j] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),v_zero);
_mm512_mask_store_ps((float *)&exyz[4*j],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*j],v_zt4);
}
/* ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*j] = zero; */
/* bxyz[1+4*j] = zt5; */
/* bxyz[2+4*j] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*j],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*j],v_zt5);
}
/* wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1)] = zero; */
/* bxyz[1+4*(j+k1)] = zero; */
/* bxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero);
/* exyz[4*(j+k1)] = zero; */
/* exyz[1+4*(j+k1)] = zero; */
/* exyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1)],v_zero);
/* bxyz[4*(j+l1)] = zero; */
/* bxyz[1+4*(j+l1)] = zero; */
/* bxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero);
/* exyz[4*(j+l1)] = zero; */
/* exyz[1+4*(j+l1)] = zero; */
/* exyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j]);
/* update magnetic field half time step */
zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I;
zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I;
zt5 = bxyz[1+4*j] + dth*(dkx*zt1);
zt6 = bxyz[2+4*j] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j];
zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
exyz[4*j] = zero;
exyz[1+4*j] = zt8;
exyz[2+4*j] = zt9;
at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*j] = zero;
bxyz[1+4*j] = zt5;
bxyz[2+4*j] = zt6;
at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1)] = zero;
bxyz[1+4*(j+k1)] = zero;
bxyz[2+4*(j+k1)] = zero;
exyz[4*(j+k1)] = zero;
exyz[1+4*(j+k1)] = zero;
exyz[2+4*(j+k1)] = zero;
bxyz[4*(j+l1)] = zero;
bxyz[1+4*(j+l1)] = zero;
bxyz[2+4*(j+l1)] = zero;
exyz[4*(j+l1)] = zero;
exyz[1+4*(j+l1)] = zero;
exyz[2+4*(j+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
bxyz[0] = zero;
bxyz[1] = zero;
bxyz[2] = zero;
exyz[0] = zero;
exyz[1] = zero;
exyz[2]= zero;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
exyz[4*k1] = zero;
exyz[1+4*k1] = zero;
exyz[2+4*k1] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
exyz[4*l1] = zero;
exyz[1+4*l1] = zero;
exyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
/* *wf = ws*((float) nx)*((float) ny)*((float) nz); */
/* sum3 += ws; */
_mm512_store_pd(&dd[0],v_ws);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum3 += (ws + dd[0]);
*wf = (sum1 + sum3)*((float) nx)*((float) ny)*((float) nz);
/* *wm = c2*wp*((float) nx)*((float) ny)*((float) nz); */
/* sum4 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum4 += (wp + dd[0]);
*wm = c2*(sum2 + sum4)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncmemfield3(float complex fxyz[], float complex exyz[],
float complex ffc[], int isign, int nx, int ny,
int nz, int nxvh, int nyv, int nzv, int nxhd,
int nyhd, int nzhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
requires KNC, fxyz, exyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
fxyz, exyz needs to have 4 components
local data */
int j, k, l, nxh, nyh, nzh, nxhs, itn, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float at1;
__m512 v_at1, v_zero, v_zt1, v_zt2;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
v_zero = _mm512_setzero_ps();
/* add the fields */
if (isign > 0) {
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,kk,kj,ll,lj,at1,v_at1,v_zt1,v_zt2)
for (l = 1; l < nzh; l++) {
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; */
/* fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; */
/* fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk+ll]);
fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1;
fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1;
fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1;
fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; */
/* fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; */
/* fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+ll]);
fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1;
fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1;
fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1;
fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
}
l1 = nxvyh*nzh;
#pragma omp parallel for private(j,k,k1,kk,kj,at1,v_at1,v_zt1,v_zt2)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; */
/* fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; */
/* fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2);
/* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1;
fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1;
fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1;
fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*j] += exyz[4*j]*at1; */
/* fxyz[1+4*j] += exyz[1+4*j]*at1; */
/* fxyz[2+4*j] += exyz[2+4*j]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*j]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*j],v_zt2);
/* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxyz[4*j] += exyz[4*j]*at1;
fxyz[1+4*j] += exyz[1+4*j]*at1;
fxyz[2+4*j] += exyz[2+4*j]*at1;
fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
/* copy the fields */
else if (isign < 0) {
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,kk,kj,ll,lj,at1,v_at1,v_zt1,v_zt2)
for (l = 1; l < nzh; l++) {
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; */
/* fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; */
/* fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk+ll]);
fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1;
fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1;
fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1;
fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; */
/* fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; */
/* fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+ll]);
fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1;
fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1;
fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1;
fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
}
l1 = nxvyh*nzh;
#pragma omp parallel for private(j,k,k1,kk,kj,at1,v_at1,v_zt1,v_zt2)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; */
/* fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; */
/* fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2);
/* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1;
fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1;
fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1;
fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*j] = exyz[4*j]*at1; */
/* fxyz[1+4*j] = exyz[1+4*j]*at1; */
/* fxyz[2+4*j] = exyz[2+4*j]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*j],v_zt2);
/* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxyz[4*j] = exyz[4*j]*at1;
fxyz[1+4*j] = exyz[1+4*j]*at1;
fxyz[2+4*j] = exyz[2+4*j]*at1;
fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in x and y is performed
f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)*
exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in x and y is performed
f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd;
int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float ani;
float complex t1, t2, t3;
__m512i v_j, v_kmr, v_m, v_n, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
nxhhs = 8*(nxhh/8);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L180;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \
v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[nxh-j+joff] = ani*conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[joff] + t1)
+ crealf(f[joff] - t1)*_Complex_I);
f[joff] = 0.5*(crealf(f[joff] + t1)
+ cimagf(f[joff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L180: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \
v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[joff] - t1);
f[joff] += t1;
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[nxh-j+joff] = conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
}
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmz(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in z is performed
f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, a forward fourier transform in z is performed
f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff;
int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1;
int nss, nxhs;
float complex t1, t2;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L90;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = f[l1];
f[l1] = 0.5*(cimagf(f[ll] + t1)
+ crealf(f[ll] - t1)*_Complex_I);
f[ll] = 0.5*(crealf(f[ll] + t1)
+ cimagf(f[ll] - t1)*_Complex_I);
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = f[i1];
f[i1] = 0.5*(cimagf(f[i0] + t1)
+ crealf(f[i0] - t1)*_Complex_I);
f[i0] = 0.5*(crealf(f[i0] + t1)
+ cimagf(f[i0] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L90: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I;
f[l1] = conjf(f[ll] - t1);
f[ll] += t1;
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I;
f[i1] = conjf(f[i0] - t1);
f[i0] += t1;
}
}
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3xy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of 3 three dimensional complex
to real fast fourier transforms and their inverses, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in x and y are
performed
f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]*
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, three forward fourier transforms in x and y are
performed
f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd;
int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float at1, at2, ani;
float complex t1, t2, t3, t4;
__m512i v_j, v_kmr, v_m, v_n, v_l, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
nxhhs = 2*(nxhh/2);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
v_half = _mm512_set1_ps(0.5f);
if (isign > 0)
goto L230;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(f[2+4*j+joff]); */
/* at2 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */
/* + crealf(f[3+4*j+joff])*_Complex_I; */
/* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
at1 = cimagf(f[2+4*j+joff]);
at2 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = crealf(f[1+4*j+joff])
+ crealf(f[3+4*j+joff])*_Complex_I;
f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = ani*(t1 + t2);
f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
}
/* ani = 2.0*ani; */
v_ani = _mm512_add_ps(v_ani,v_ani);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_mul_ps(v_ani,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = ani*((crealf(f[jj+joff]) */
/* + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I); */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
v_t3 = _mm512_mul_ps(v_ani,v_t3);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = f[jj+k1]; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
/* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */
/* + crealf(f[jj+joff] - t1)*_Complex_I); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
v_t3 = _mm512_mul_ps(v_half,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */
/* + cimagf(f[jj+joff] - t1)*_Complex_I); */
/* } */
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1);
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1);
v_t2 = _mm512_mul_ps(v_half,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2);
}
}
return;
/* forward fourier transform */
L230: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177);
/* f[jj+k1] = conjf(f[jj+joff] - t1); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42),
v_zero,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] += t1; */
/* } */
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t2);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = t1 + t2;
f[jj+4*(nxh-j)+joff] = conjf(t1 - t2);
}
}
}
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_add_ps(v_t1,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I; */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */
/* + cimagf(f[3+4*j+joff])*_Complex_I; */
/* at1 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = cimagf(f[4*j+joff]) */
/* + cimagf(f[1+4*j+joff])*_Complex_I; */
/* at2 = crealf(f[1+4*j+joff]); */
/* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
f[3+4*j+joff] = cimagf(f[2+4*j+joff])
+ cimagf(f[3+4*j+joff])*_Complex_I;
at1 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = cimagf(f[4*j+joff])
+ cimagf(f[1+4*j+joff])*_Complex_I;
at2 = crealf(f[1+4*j+joff]);
f[1+4*j+joff] = at1 + 0.0*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3z(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of 3 three dimensional complex to
real fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in z are performed
f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, three forward fourier transforms in z are performed
f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff;
int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr;
int i0, i1;
int nxhs;
float complex t1, t2, t3, t4;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L110;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+l1];
f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1)
+ crealf(f[jj+ll] - t1)*_Complex_I);
f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1)
+ cimagf(f[jj+ll] - t1)*_Complex_I);
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+i1];
f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1)
+ crealf(f[jj+i0] - t1)*_Complex_I);
f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1)
+ cimagf(f[jj+i0] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L110: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I;
f[jj+l1] = conjf(f[jj+ll] - t1);
f[jj+ll] += t1;
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I;
f[jj+i1] = conjf(f[jj+i0] - t1);
f[jj+i0] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for real to complex fft, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for 3 2d real to complex ffts, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void ckncgbppush3lt_(float *ppart, float *fxyz, float *bxyz ,int *kpic,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *nz,
int *mx, int *my, int *mz, int *nxv, int *nyv,
int *nzv, int *mx1, int *my1, int *mxyz1,
int *ipbc) {
ckncgbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,
*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,
*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ntmax, int *irc) {
ckncgbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,
*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,
*mx1,*my1,*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrbppush3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic,
float *qbm, float *dt, float *dtc, float *ci,
float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ipbc) {
ckncgrbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,
*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,
*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrbppushf3lt_(float *ppart, float *fxyz, float *bxyz,
int *kpic, int *ncl, int *ihole, float *qbm,
float *dt, float *dtc, float *ci, float *ek,
int *idimp, int *nppmx, int *nx, int *ny,
int *nz, int *mx, int *my, int *mz, int *nxv,
int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ntmax, int *irc) {
ckncgrbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void ckncgjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ipbc) {
ckncgjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx,
*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgjppostf3lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, int *nppmx,
int *idimp, int *nx, int *ny, int *nz, int *mx,
int *my, int *mz, int *nxv, int *nyv, int *nzv,
int *mx1, int *my1, int *mxyz1, int *ntmax,
int *irc) {
ckncgjppostf3lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,
*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ipbc) {
ckncgrjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz,
*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrjppostf3lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, float *ci,
int *nppmx, int *idimp, int *nx, int *ny,
int *nz, int *mx, int *my, int *mz, int *nxv,
int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ntmax, int *irc) {
ckncgrjppostf3lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,
*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,
*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cknc2gjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ipbc) {
cknc2gjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx,
*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cknc2grjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ipbc) {
cknc2grjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz,
*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mz1, int *npbmx,
int *ntmax, int *irc) {
ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz,
*mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *mz1, int *npbmx, int *ntmax,
int *irc) {
ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpp2order3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mz1, int *npbmx,
int *ntmax, int *irc) {
ckncpp2order3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,
*nz,*mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncacguard3l_(float *cu, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ckncacguard3l(cu,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33_(float complex *q, float complex *fxyz, int *isign,
float complex *ffc, float *ax, float *ay, float *az,
float *affp, float *we, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd,
int *nzhd) {
ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh,
*nyv,*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncmcuperp3_(float complex *cu, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv) {
ckncmcuperp3(cu,*nx,*ny,*nz,*nxvh,*nyv,*nzv);
return;
}
/*--------------------------------------------------------------------*/
void ckncmibpois33_(float complex *cu, float complex *bxyz,
float complex *ffc, float *ci, float *wm, int *nx,
int *ny, int *nz, int *nxvh, int *nyv, int *nzv,
int *nxhd, int *nyhd, int *nzhd) {
ckncmibpois33(cu,bxyz,ffc,*ci,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd,
*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncmmaxwel3_(float complex *exyz, float complex *bxyz,
float complex *cu, float complex *ffc, float *ci,
float *dt, float *wf, float *wm, int *nx, int *ny,
int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd,
int *nyhd, int *nzhd) {
ckncmmaxwel3(exyz,bxyz,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nz,*nxvh,*nyv,
*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncmemfield3_(float complex *fxyz, float complex *exyz,
float complex *ffc, int *isign, int *nx, int *ny,
int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd,
int *nyhd, int *nzhd) {
ckncmemfield3(fxyz,exyz,ffc,*isign,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd,
*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
|
ams.c
|
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
#include "_hypre_utilities.hpp"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Int relax_type, /* relaxation type */
HYPRE_Int relax_times, /* number of sweeps */
HYPRE_Real *l1_norms, /* l1 norms of the rows of A */
HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */
HYPRE_Real omega, /* SOR parameter (usually in (0,2) */
HYPRE_Real max_eig_est, /* for cheby smoothers */
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *z /* temporary vector */ )
{
HYPRE_Int sweep;
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z);
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
#if 0
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, 1.0, 1.0, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
}
else if (relax_weight == 1.0) /* SSOR */
{
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, 1.0, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
}
else /* scaled SSOR */
{
#endif
/* !!! relax_weight and omega flipped !!! */
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
#if 0
}
#endif
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
{
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex>
{
__host__ __device__
HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const
{
return x <= 4.0/3.0 * y ? y : x;
}
};
#endif
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 );
if (exec == HYPRE_EXEC_HOST)
{
HYPRE_Int num_threads = hypre_NumThreads();
if (num_threads > 1)
{
return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr);
}
}
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1);
HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
HYPRE_Real *diag_tmp = NULL;
HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data,
memory_location_tmp, cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (exec == HYPRE_EXEC_DEVICE)
{
cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
}
else
{
cf_marker_dev = cf_marker;
}
}
if (option == 1)
{
/* Set the l1 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set");
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 2)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 3)
{
/* Set the CF l2 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set");
/* Add the CF l2 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add");
}
}
else if (option == 4)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1);
/* Add the scaled l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add");
}
/* Truncate according to Remark 6.2 */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i])
{
l1_norm[i] = diag_tmp[i];
}
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if ( exec == HYPRE_EXEC_DEVICE)
{
thrust::identity<HYPRE_Complex> identity;
HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] == 0.0)
{
l1_norm[i] = 1.0;
}
}
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
if (!diag_tmp)
{
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
}
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(),
is_negative<HYPRE_Real>() );
//bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) );
bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() );
if ( any_zero )
{
hypre_error_in_arg(1);
}
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (diag_tmp[i] < 0.0)
{
l1_norm[i] = -l1_norm[i];
}
}
for (i = 0; i < num_rows; i++)
{
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(cf_marker_offd, memory_location_tmp);
hypre_TFree(diag_tmp, memory_location_tmp);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
hypre_SeqVectorDestroy(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
{
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
lfactor = fabs(B_diag_data[i]);
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
lfactor = fabs(B_offd_data[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
}
hypre_ParcsrAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
{
HYPRE_Real *l1_norm_data = NULL;
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data);
ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A));
hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data;
hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A));
}
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST);
HYPRE_Int part_size;
HYPRE_BigInt *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
{
hypre_error_in_arg(4);
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
part_size = 2;
row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt *vert_part, num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
for (i = ns; i < ne; i++)
{
l1_norm[i] = A_diag_data[A_diag_I[i]];
if (l1_norm[i] == 0) l1_norm[i] = 1.0;
}
}
if (option < 5)
{
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
|
ompfor5.c
|
/*
* test decremental loop iteration space
* Liao 9/22/2009
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void foo(int iend, int ist)
{
int i;
#pragma omp parallel
{
#pragma omp single
printf ("Using %d threads.\n",omp_get_num_threads());
#pragma omp for nowait schedule(static)
for (i=iend;i>=ist;i--)
{
printf("Iteration %d is carried out by thread %d\n",i, omp_get_thread_num());
}
}
}
|
main.c
|
/*======================================*/
/*= Autor: Tiago Serique Valadares =*/
/*= GRR: 20195138 =*/
/*= Disciplina: Aprendizado de Maquina =*/
/*======================================*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "knn.h"
#include "read_data.h"
int main(int argc, char *argv[]){
char train_base_file_name[LINESIZE];
char test_base_file_name[LINESIZE];
int k = 0;
int n_lines_test = 0;
int n_features = 0;
int n_lines_train = 0;
int n_classes = 0;
int **confusion_matrix = NULL;
Data *train_data_array = NULL;
Data *test_data_array = NULL;
FILE* train_base_file = NULL;
FILE* test_base_file = NULL;
if ( argc < 4 ){
printf("Formato de entrada:\n");
printf("knn <base de treinamento> <base de teste> <valor de k>\n");
return EXIT_FAILURE;
}
strcpy(train_base_file_name, argv[1]);
strcpy(test_base_file_name, argv[2]);
k = atoi(argv[3]);
// open the train base file
train_base_file = fopen(train_base_file_name, "r");
if ( train_base_file == NULL ){
printf("Not able to open the train base file\n");
return EXIT_FAILURE;
}
train_data_array = readData(train_base_file, &n_lines_train,
&n_features, &n_classes);
fclose(train_base_file);
// open the test base file
test_base_file = fopen(test_base_file_name, "r");
if ( test_base_file == NULL ){
printf("Not able to open the test base file\n");
return EXIT_FAILURE;
}
test_data_array = readData(test_base_file, &n_lines_test, &n_features, &n_classes);
fclose(test_base_file);
confusion_matrix = (int **)malloc(sizeof(int *) * n_classes +
n_classes * n_classes * sizeof(int));
confusion_matrix[0] = (int *)(confusion_matrix + n_classes);
#pragma omp parallel for
for (int i = 1; i < n_classes; i++)
confusion_matrix[i] = confusion_matrix[0] + (i * n_classes);
#pragma omp parallel for
for (int i = 0; i < n_classes; i++)
for (int j = 0; j < n_classes; j++)
confusion_matrix[i][j] = 0;
knn(confusion_matrix, train_data_array, test_data_array, k, n_lines_train,
n_lines_test, n_features, n_classes);
printConfusionMatrix(confusion_matrix, n_classes);
calculateAccuracy(confusion_matrix, n_classes);
free(train_data_array);
free(test_data_array);
free(confusion_matrix);
return EXIT_SUCCESS;
}
|
GB_binop__bshift_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_int64
// A.*B function (eWiseMult): GB_AemultB__bshift_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_int64
// C+=b function (dense accum): GB_Cdense_accumb__bshift_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int64
// C=scalar+B GB_bind1st__bshift_int64
// C=scalar+B' GB_bind1st_tran__bshift_int64
// C=A+scalar GB_bind2nd__bshift_int64
// C=A'+scalar GB_bind2nd_tran__bshift_int64
// C type: int64_t
// A type: int64_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_bitshift_int64 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = GB_bitshift_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int64 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int64 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_timers.c
|
#include "wtime.h"
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "hooks_base.h"
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
#if defined(_OPENMP) && (_OPENMP > 200010)
/* Use the OpenMP timer if we can */
t = omp_get_wtime();
#else
wtime( &t );
#endif
return( t );
}
static double start[64], elapsed[64];
#ifdef _OPENMP
#pragma omp threadprivate(start, elapsed)
#endif
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
|
GB_binop__islt_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_uint16
// A.*B function (eWiseMult): GB_AemultB__islt_uint16
// A*D function (colscale): GB_AxD__islt_uint16
// D*A function (rowscale): GB_DxB__islt_uint16
// C+=B function (dense accum): GB_Cdense_accumB__islt_uint16
// C+=b function (dense accum): GB_Cdense_accumb__islt_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_uint16
// C=scalar+B GB_bind1st__islt_uint16
// C=scalar+B' GB_bind1st_tran__islt_uint16
// C=A+scalar GB_bind2nd__islt_uint16
// C=A'+scalar GB_bind2nd_tran__islt_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c
|
#include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) {
for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(32*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(32*t3+Nx+19,32));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),8*t4+6);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
UpdateCombinedNeighboursWorklet.h
|
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h
#define vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_dem_contourtree_mesh_inc
{
class UpdateCombinedNeighboursWorklet : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(
WholeArrayIn firstNeighbour, // (input) this->firstNerighbour or other.firstNeighbour
WholeArrayIn neighbours, // (input) this->neighbours or other.neighbours array
WholeArrayIn
toCombinedSortOrder, // (input) thisToCombinedSortOrder or otherToCombinedSortOrder array
WholeArrayIn combinedFirstNeighbour, // (input) combinedFirstNeighbour array in both cases
WholeArrayIn
combinedOtherStartIndex, // (input) const 0 array of length combinedOtherStartIndex for this and combinedOtherStartIndex for other loop
WholeArrayOut combinedNeighbours); // (output) combinedNeighbours array in both cases
typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6);
typedef _1 InputDomain;
// Default Constructor
VTKM_EXEC_CONT
UpdateCombinedNeighboursWorklet() {}
template <typename InFieldPortalType, typename InFieldPortalType2, typename OutFieldPortalType>
VTKM_EXEC void operator()(
const InFieldPortalType& firstNeighbourPortal,
const vtkm::Id vtx,
const InFieldPortalType& neighboursPortal,
const InFieldPortalType& toCombinedSortOrderPortal,
const InFieldPortalType& combinedFirstNeighbourPortal,
const InFieldPortalType2&
combinedOtherStartIndexPortal, // We need another InFieldPortalType here to allow us to hand in a smart array handle instead of a VTKM array
const OutFieldPortalType& combinedNeighboursPortal) const
{
vtkm::Id totalNumNeighbours = neighboursPortal.GetNumberOfValues();
vtkm::Id totalNumVertices = firstNeighbourPortal.GetNumberOfValues();
vtkm::Id numNeighbours = (vtx < totalNumVertices - 1)
? firstNeighbourPortal.Get(vtx + 1) - firstNeighbourPortal.Get(vtx)
: totalNumNeighbours - firstNeighbourPortal.Get(vtx);
for (vtkm::Id nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighboursPortal.Set(
combinedFirstNeighbourPortal.Get(toCombinedSortOrderPortal.Get(vtx)) +
combinedOtherStartIndexPortal.Get(toCombinedSortOrderPortal.Get(vtx)) + nbrNo,
toCombinedSortOrderPortal.Get(neighboursPortal.Get(firstNeighbourPortal.Get(vtx) + nbrNo)));
}
/*
This worklet implemnts the following two loops from the original OpenMP code
The two loops are the same but the arrays required are different
#pragma omp parallel for
for (indexVector::size_type vtx = 0; vtx < firstNeighbour.size(); ++vtx)
{
indexType numNeighbours = (vtx < GetNumberOfVertices() - 1) ? firstNeighbour[vtx+1] - firstNeighbour[vtx] : neighbours.size() - firstNeighbour[vtx];
for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighbours[combinedFirstNeighbour[thisToCombinedSortOrder[vtx]] + nbrNo] = thisToCombinedSortOrder[neighbours[firstNeighbour[vtx] + nbrNo]];
}
}
#pragma omp parallel for
for (indexVector::size_type vtx = 0; vtx < other.firstNeighbour.size(); ++vtx)
{
indexType numNeighbours = (vtx < other.GetNumberOfVertices() - 1) ? other.firstNeighbour[vtx+1] - other.firstNeighbour[vtx] : other.neighbours.size() - other.firstNeighbour[vtx];
for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighbours[combinedFirstNeighbour[otherToCombinedSortOrder[vtx]] + combinedOtherStartIndex[otherToCombinedSortOrder[vtx]] + nbrNo] = otherToCombinedSortOrder[other.neighbours[other.firstNeighbour[vtx] + nbrNo]];
}
}
*/
}
}; // AdditionAssignWorklet
} // namespace mesh_dem_contourtree_mesh_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif
|
openMP.c
|
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int someMath(int num,int i){
num = num * num * num * i;
return num;
}
int main(int argc, char** argv){
int nthreads, tid;
int i;
int num = atoi(argv[1]);
int data[10000];
#pragma omp parallel
{
#pragma omp for
for(i=0; i<10000; i++){
data[i] = someMath(num,i);
}
}
FILE *fp;
fp = fopen("data.csv","w+");
for(i=0; i<10000; i++){
fprintf(fp, "%d,",data[i]);
}
fprintf(fp, "\n");
for(i=0; i<10000; i++){
fprintf(fp, "%d,",data[i]);
}
fprintf(fp, "\n");
fclose(fp);
}
|
repair.c
|
#include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <stdio.h>
/*
* NOTE: The repair phase is essentially a mesh-wide scattering stencil.
* Essentially the whole stencil needs to be owned by a single thread to stop
* data races...
*
* One method that could be employed here is to essentially break the problem
* down and analyse the dependencies at runtime.
*
* Steps:
*
* 1) determine the quantities needed to repair extrema
* 2) check the 2 deep stencil of each node/cell to check if we actually have a
* dependency.
* 3) construct an indirection with all independent work and one of the
* dependent elements from each chain
* 4) perform all of the work on that indirection in parallel
* 5) construct another list that contains another single item of the work that
* was considered dependent
* 6) perform all of the individual dependent element's work
* 7) repeat 5 and 6 until completion.
*/
// Repairs the subcell extrema for mass
void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets,
const int* subcells_to_subcells_offsets,
const int* subcells_to_subcells,
double* subcell_volume, double* subcell_mass);
// Repairs the extrema at the nodal velocities
void repair_velocity_extrema(const int nnodes,
const int* nodes_to_nodes_offsets,
const int* nodes_to_nodes, double* velocity_x,
double* velocity_y, double* velocity_z);
// Repairs the subcell extrema for mass
void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces,
const int* faces_to_cells0,
const int* faces_to_cells1, double* energy);
// Redistributes the mass according to the determined neighbour availability
void redistribute_subcell_mass(double* mass, const int subcell_index,
const int nsubcell_neighbours,
const int* subcells_to_subcells,
const int subcell_to_subcells_off,
const double* dmass_avail_neighbour,
const double dmass_avail,
const double dmass_need, const double g,
const double subcell_vol, const int is_min);
// Performs a conservative repair of the mesh
void mass_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
// Advects mass and energy through the subcell faces using swept edge approx
repair_subcell_extrema(umesh->ncells, umesh->cells_to_nodes_offsets,
hale_data->subcells_to_subcells_offsets,
hale_data->subcells_to_subcells,
hale_data->subcell_volume, hale_data->subcell_mass);
}
// Repairs the nodal velocities
void velocity_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
repair_velocity_extrema(umesh->nnodes, umesh->nodes_to_nodes_offsets,
umesh->nodes_to_nodes, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0);
}
// Repairs the energy
void energy_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
repair_energy_extrema(umesh->ncells, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->faces_to_cells0,
umesh->faces_to_cells1, hale_data->energy0);
}
// Repairs the subcell extrema for mass
void repair_velocity_extrema(const int nnodes,
const int* nodes_to_nodes_offsets,
const int* nodes_to_nodes, double* velocity_x,
double* velocity_y, double* velocity_z) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_nodes_off = nodes_to_nodes_offsets[(nn)];
const int nnodes_by_node =
nodes_to_nodes_offsets[(nn + 1)] - node_to_nodes_off;
double gmax_vx = -DBL_MAX;
double gmin_vx = DBL_MAX;
double gmax_vy = -DBL_MAX;
double gmin_vy = DBL_MAX;
double gmax_vz = -DBL_MAX;
double gmin_vz = DBL_MAX;
double dvx_total_avail_donate = 0.0;
double dvx_total_avail_receive = 0.0;
double dvy_total_avail_donate = 0.0;
double dvy_total_avail_receive = 0.0;
double dvz_total_avail_donate = 0.0;
double dvz_total_avail_receive = 0.0;
double dvx_avail_donate_neighbour[(nnodes_by_node)];
double dvx_avail_receive_neighbour[(nnodes_by_node)];
double dvy_avail_donate_neighbour[(nnodes_by_node)];
double dvy_avail_receive_neighbour[(nnodes_by_node)];
double dvz_avail_donate_neighbour[(nnodes_by_node)];
double dvz_avail_receive_neighbour[(nnodes_by_node)];
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
const int neighbour_to_nodes_off =
nodes_to_nodes_offsets[(neighbour_index)];
const int nnodes_by_neighbour =
nodes_to_nodes_offsets[(neighbour_index + 1)] -
neighbour_to_nodes_off;
vec_t neighbour_v = {velocity_x[(neighbour_index)],
velocity_y[(neighbour_index)],
velocity_z[(neighbour_index)]};
double neighbour_gmax_vx = -DBL_MAX;
double neighbour_gmin_vx = DBL_MAX;
double neighbour_gmax_vy = -DBL_MAX;
double neighbour_gmin_vy = DBL_MAX;
double neighbour_gmax_vz = -DBL_MAX;
double neighbour_gmin_vz = DBL_MAX;
for (int nn3 = 0; nn3 < nnodes_by_neighbour; ++nn3) {
const int neighbour_neighbour_index =
nodes_to_nodes[(neighbour_to_nodes_off + nn3)];
if (neighbour_neighbour_index == -1) {
continue;
}
neighbour_gmax_vx =
max(neighbour_gmax_vx, velocity_x[(neighbour_neighbour_index)]);
neighbour_gmin_vx =
min(neighbour_gmin_vx, velocity_x[(neighbour_neighbour_index)]);
neighbour_gmax_vy =
max(neighbour_gmax_vy, velocity_y[(neighbour_neighbour_index)]);
neighbour_gmin_vy =
min(neighbour_gmin_vy, velocity_y[(neighbour_neighbour_index)]);
neighbour_gmax_vz =
max(neighbour_gmax_vz, velocity_z[(neighbour_neighbour_index)]);
neighbour_gmin_vz =
min(neighbour_gmin_vz, velocity_z[(neighbour_neighbour_index)]);
}
dvx_avail_donate_neighbour[(nn2)] =
max(neighbour_v.x - neighbour_gmin_vx, 0.0);
dvx_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vx - neighbour_v.x, 0.0);
dvy_avail_donate_neighbour[(nn2)] =
max(neighbour_v.y - neighbour_gmin_vy, 0.0);
dvy_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vy - neighbour_v.y, 0.0);
dvz_avail_donate_neighbour[(nn2)] =
max(neighbour_v.z - neighbour_gmin_vz, 0.0);
dvz_avail_receive_neighbour[(nn2)] =
max(neighbour_gmax_vz - neighbour_v.z, 0.0);
dvx_total_avail_donate += dvx_avail_donate_neighbour[(nn2)];
dvx_total_avail_receive += dvx_avail_receive_neighbour[(nn2)];
dvy_total_avail_donate += dvy_avail_donate_neighbour[(nn2)];
dvy_total_avail_receive += dvy_avail_receive_neighbour[(nn2)];
dvz_total_avail_donate += dvz_avail_donate_neighbour[(nn2)];
dvz_total_avail_receive += dvz_avail_receive_neighbour[(nn2)];
gmax_vx = max(gmax_vx, neighbour_v.x);
gmin_vx = min(gmin_vx, neighbour_v.x);
gmax_vy = max(gmax_vy, neighbour_v.y);
gmin_vy = min(gmin_vy, neighbour_v.y);
gmax_vz = max(gmax_vz, neighbour_v.z);
gmin_vz = min(gmin_vz, neighbour_v.z);
}
vec_t cell_v = {velocity_x[(nn)], velocity_y[(nn)], velocity_z[(nn)]};
const double dvx_need_receive = gmin_vx - cell_v.x;
const double dvx_need_donate = cell_v.x - gmax_vx;
const double dvy_need_receive = gmin_vy - cell_v.y;
const double dvy_need_donate = cell_v.y - gmax_vy;
const double dvz_need_receive = gmin_vz - cell_v.z;
const double dvz_need_donate = cell_v.z - gmax_vz;
if (dvx_need_receive > 0.0) {
velocity_x[(nn)] = gmin_vx;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_x[(neighbour_index)] -=
(dvx_avail_donate_neighbour[(nn2)] / dvx_total_avail_donate) *
dvx_need_receive;
}
} else if (dvx_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_x[(nn)] = gmax_vx;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_x[(neighbour_index)] +=
(dvx_avail_receive_neighbour[(nn2)] / dvx_total_avail_receive) *
dvx_need_donate;
}
}
if (dvy_need_receive > 0.0) {
velocity_y[(nn)] = gmin_vy;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_y[(neighbour_index)] -=
(dvy_avail_donate_neighbour[(nn2)] / dvy_total_avail_donate) *
dvy_need_receive;
}
} else if (dvy_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_y[(nn)] = gmax_vy;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_y[(neighbour_index)] +=
(dvy_avail_receive_neighbour[(nn2)] / dvy_total_avail_receive) *
dvy_need_donate;
}
}
if (dvz_need_receive > 0.0) {
velocity_z[(nn)] = gmin_vz;
// Loop over the nodes attached to this node
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_z[(neighbour_index)] -=
(dvz_avail_donate_neighbour[(nn2)] / dvz_total_avail_donate) *
dvz_need_receive;
}
} else if (dvz_need_donate > 0.0) {
// Loop over the nodes attached to this node
velocity_z[(nn)] = gmax_vz;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
velocity_z[(neighbour_index)] +=
(dvz_avail_receive_neighbour[(nn2)] / dvz_total_avail_receive) *
dvz_need_donate;
}
}
if (dvx_total_avail_donate < dvx_need_receive ||
dvx_total_avail_receive < dvx_need_donate ||
dvy_total_avail_donate < dvy_need_receive ||
dvy_total_avail_receive < dvy_need_donate ||
dvz_total_avail_donate < dvz_need_receive ||
dvz_total_avail_receive < dvz_need_donate) {
printf("Repair stage needs additional level.\n");
continue;
}
}
}
// Repairs the subcell extrema for mass
void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets,
const int* cells_to_faces,
const int* faces_to_cells0,
const int* faces_to_cells1, double* energy) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
double gmax_ie = -DBL_MAX;
double gmin_ie = DBL_MAX;
double die_total_avail_donate = 0.0;
double die_total_avail_receive = 0.0;
double die_avail_donate_neighbour[(nfaces_by_cell)];
double die_avail_receive_neighbour[(nfaces_by_cell)];
const double cell_ie = energy[(cc)];
// Loop over the nodes attached to this node
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
const double neighbour_ie = energy[(neighbour_index)];
double neighbour_gmax_ie = -DBL_MAX;
double neighbour_gmin_ie = DBL_MAX;
const int neighbour_to_faces_off =
cells_to_faces_offsets[(neighbour_index)];
const int nfaces_by_neighbour =
cells_to_faces_offsets[(neighbour_index + 1)] -
neighbour_to_faces_off;
for (int ff2 = 0; ff2 < nfaces_by_neighbour; ++ff2) {
const int neighbour_face_index =
cells_to_faces[(neighbour_to_faces_off + ff2)];
const int neighbour_neighbour_index =
(faces_to_cells0[(neighbour_face_index)] == neighbour_index)
? faces_to_cells1[(neighbour_face_index)]
: faces_to_cells0[(neighbour_face_index)];
if (neighbour_neighbour_index == -1) {
continue;
}
neighbour_gmax_ie =
max(neighbour_gmax_ie, energy[(neighbour_neighbour_index)]);
neighbour_gmin_ie =
min(neighbour_gmin_ie, energy[(neighbour_neighbour_index)]);
}
die_avail_donate_neighbour[(ff)] =
max(neighbour_ie - neighbour_gmin_ie, 0.0);
die_avail_receive_neighbour[(ff)] =
max(neighbour_gmax_ie - neighbour_ie, 0.0);
die_total_avail_donate += die_avail_donate_neighbour[(ff)];
die_total_avail_receive += die_avail_receive_neighbour[(ff)];
gmax_ie = max(gmax_ie, neighbour_ie);
gmin_ie = min(gmin_ie, neighbour_ie);
}
const double die_need_receive = gmin_ie - cell_ie;
const double die_need_donate = cell_ie - gmax_ie;
if (die_need_receive > 0.0) {
energy[(cc)] = gmin_ie;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
energy[(neighbour_index)] -=
(die_avail_donate_neighbour[(ff)] / die_total_avail_donate) *
die_need_receive;
}
} else if (die_need_donate > 0.0) {
// Loop over the nodes attached to this node
energy[(cc)] = gmax_ie;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
if (neighbour_index == -1) {
continue;
}
energy[(neighbour_index)] +=
(die_avail_receive_neighbour[(ff)] / die_total_avail_receive) *
die_need_donate;
}
}
if (die_total_avail_donate < die_need_receive ||
die_total_avail_receive < die_need_donate) {
printf("Repair stage needs additional level.\n");
continue;
}
}
}
// Repairs the subcell extrema for mass
void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets,
const int* subcells_to_subcells_offsets,
const int* subcells_to_subcells,
double* subcell_volume, double* subcell_mass) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
// Looping over corner subcells here
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int subcell_index = cell_to_nodes_off + nn;
const int subcell_to_subcells_off =
subcells_to_subcells_offsets[(subcell_index)];
const int nsubcell_neighbours =
subcells_to_subcells_offsets[(subcell_index + 1)] -
subcell_to_subcells_off;
const double subcell_vol = subcell_volume[(subcell_index)];
const double subcell_m_density =
subcell_mass[(subcell_index)] / subcell_vol;
double gmax_m = -DBL_MAX;
double gmin_m = DBL_MAX;
double dm_avail_donate = 0.0;
double dm_avail_receive = 0.0;
double dm_avail_donate_neighbour[(nsubcell_neighbours)];
double dm_avail_receive_neighbour[(nsubcell_neighbours)];
// Loop over neighbours
for (int ss = 0; ss < nsubcell_neighbours; ++ss) {
const int neighbour_index =
subcells_to_subcells[(subcell_to_subcells_off + ss)];
// Ignore boundary neighbours
if (neighbour_index == -1) {
continue;
}
const int neighbour_to_subcells_off =
subcells_to_subcells_offsets[(neighbour_index)];
const int nneighbour_neighbours =
subcells_to_subcells_offsets[(neighbour_index + 1)] -
neighbour_to_subcells_off;
const double neighbour_vol = subcell_volume[(neighbour_index)];
const double neighbour_m_density =
subcell_mass[(neighbour_index)] / neighbour_vol;
double neighbour_gmax_m = -DBL_MAX;
double neighbour_gmin_m = DBL_MAX;
// Loop over neighbour's neighbours
for (int ss2 = 0; ss2 < nneighbour_neighbours; ++ss2) {
const int neighbour_neighbour_index =
subcells_to_subcells[(neighbour_to_subcells_off + ss2)];
// Ignore boundary neighbours
if (neighbour_neighbour_index == -1) {
continue;
}
const double neighbour_neighbour_vol =
subcell_volume[(neighbour_neighbour_index)];
const double neighbour_neighbour_m_density =
subcell_mass[(neighbour_neighbour_index)] /
neighbour_neighbour_vol;
// Store the maximum / minimum values for rho in the neighbourhood
neighbour_gmax_m =
max(neighbour_gmax_m, neighbour_neighbour_m_density);
neighbour_gmin_m =
min(neighbour_gmin_m, neighbour_neighbour_m_density);
}
dm_avail_donate_neighbour[(ss)] =
max((neighbour_m_density - neighbour_gmin_m) * subcell_vol, 0.0);
dm_avail_receive_neighbour[(ss)] =
max((neighbour_gmax_m - neighbour_m_density) * subcell_vol, 0.0);
dm_avail_donate += dm_avail_donate_neighbour[(ss)];
dm_avail_receive += dm_avail_receive_neighbour[(ss)];
gmax_m = max(gmax_m, neighbour_m_density);
gmin_m = min(gmin_m, neighbour_m_density);
}
const double dm_need_receive = (gmin_m - subcell_m_density) * subcell_vol;
const double dm_need_donate = (subcell_m_density - gmax_m) * subcell_vol;
if (dm_need_receive > 0.0) {
redistribute_subcell_mass(subcell_mass, subcell_index,
nsubcell_neighbours, subcells_to_subcells,
subcell_to_subcells_off,
dm_avail_donate_neighbour, dm_avail_donate,
dm_need_receive, gmin_m, subcell_vol, 1);
} else if (dm_need_donate > 0.0) {
redistribute_subcell_mass(subcell_mass, subcell_index,
nsubcell_neighbours, subcells_to_subcells,
subcell_to_subcells_off,
dm_avail_receive_neighbour, dm_avail_receive,
dm_need_donate, gmax_m, subcell_vol, 0);
}
if (dm_avail_donate < dm_need_receive ||
dm_avail_receive < dm_need_donate) {
printf("dm_avail_donate %.12e dm_need_receive %.12e dm_avail_receive "
"%.12e dm_need_donate %.12e\n",
dm_avail_donate, dm_need_receive, dm_avail_receive,
dm_need_donate);
printf("Repair stage needs additional level.\n");
continue;
}
}
}
}
// Redistributes the mass according to the determined neighbour availability
void redistribute_subcell_mass(double* mass, const int subcell_index,
const int nsubcell_neighbours,
const int* subcells_to_subcells,
const int subcell_to_subcells_off,
const double* dmass_avail_neighbour,
const double dmass_avail,
const double dmass_need, const double g,
const double subcell_vol, const int is_min) {
mass[(subcell_index)] = g * subcell_vol;
// Loop over neighbours
for (int ss = 0; ss < nsubcell_neighbours; ++ss) {
const int neighbour_index =
subcells_to_subcells[(subcell_to_subcells_off + ss)];
mass[(neighbour_index)] += (is_min ? -1.0 : 1.0) *
(dmass_avail_neighbour[(ss)] / dmass_avail) *
dmass_need;
}
}
|
eavlSimpleReverseIndexOp.h
|
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SIMPLE_REVERSE_INDEX_OP_H
#define EAVL_SIMPLE_REVERSE_INDEX_OP_H
#include "eavlOperation.h"
#include "eavlArray.h"
#include "eavlException.h"
/// like reverse-index op, but assume the output counts
/// can only ever be "1", so we treat the output-count array
/// like a simple boolean flag, and we don't need to generate
/// a reverse subindex array.
static void eavlSimpleReverseIndexOp_CPU(int nInputVals,
int *inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *outII, int outIImul, int outIIadd)
{
#pragma omp parallel for
for (int i=0; i<nInputVals; i++)
{
int outflag = inOF[((i/inOFdiv)%inOFmod)*inOFmul+inOFadd];
int outindex = inOI[((i/inOIdiv)%inOImod)*inOImul+inOIadd];
if (outflag)
outII[outindex*outIImul+outIIadd] = i;
}
}
#if defined __CUDACC__
__global__ static void eavlSimpleReverseIndexOp_kernel(int nInputVals,
int *inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *outII, int outIImul, int outIIadd)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = threadID; index < nInputVals; index += numThreads)
{
int outflag = inOF[((index/inOFdiv)%inOFmod)*inOFmul+inOFadd];
int outindex = inOI[((index/inOIdiv)%inOImod)*inOImul+inOIadd];
if (outflag)
outII[outindex*outIImul+outIIadd] = index;
}
}
static void eavlSimpleReverseIndexOp_GPU(int nInputVals,
int *d_inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *d_inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *d_outII, int outIImul, int outIIadd)
{
int numBlocks = 32;
int numThreads = 256;
eavlSimpleReverseIndexOp_kernel<<<numBlocks, numThreads>>>
(nInputVals,
d_inOF, inOFdiv, inOFmod, inOFmul, inOFadd,
d_inOI, inOIdiv, inOImod, inOImul, inOIadd,
d_outII, outIImul, outIIadd);
CUDA_CHECK_ERROR();
}
#endif
// ****************************************************************************
// Class: eavlSimpleReverseIndexOp
//
// Purpose:
/// Given an input array of booleans, and an input array of output starting
/// indices (usually created by the caller using an exclusive scan of the
/// first array), generate an output array containing a map back to the
/// input index.
///
/// For example, if inOutputFlag is [0 1 1 0 1 0],
/// and inOutputIndex is thus [0 0 1 2 2 3], then
/// the result in outInputIndex will be [1 2 4] (i.e. the list of
/// indices from the input array which were set to 1).
//
// Programmer: Jeremy Meredith
// Creation: March 3, 2012
//
// Modifications:
// ****************************************************************************
class eavlSimpleReverseIndexOp : public eavlOperation
{
protected:
eavlArrayWithLinearIndex inOutputFlag;
eavlArrayWithLinearIndex inOutputIndex;
eavlArrayWithLinearIndex outInputIndex;
public:
eavlSimpleReverseIndexOp(eavlArrayWithLinearIndex inOutputFlag_,
eavlArrayWithLinearIndex inOutputIndex_,
eavlArrayWithLinearIndex outInputIndex_)
: inOutputFlag(inOutputFlag_),
inOutputIndex(inOutputIndex_),
outInputIndex(outInputIndex_)
{
}
virtual void GoCPU()
{
int n = inOutputFlag.array->GetNumberOfTuples();
eavlIntArray *inOF = dynamic_cast<eavlIntArray*>(inOutputFlag.array);
eavlIntArray *inOI = dynamic_cast<eavlIntArray*>(inOutputIndex.array);
eavlIntArray *outII = dynamic_cast<eavlIntArray*>(outInputIndex.array);
if (!inOF || !inOI || !outII)
THROW(eavlException,"eavlSimpleReverseIndexOp expects all integer arrays.");
eavlSimpleReverseIndexOp_CPU(n,
(int*)inOF->GetHostArray(), inOutputFlag.div, inOutputFlag.mod, inOutputFlag.mul, inOutputFlag.add,
(int*)inOI->GetHostArray(), inOutputIndex.div, inOutputIndex.mod, inOutputIndex.mul, inOutputIndex.add,
(int*)outII->GetHostArray(), outInputIndex.mul, outInputIndex.add);
}
virtual void GoGPU()
{
#if defined __CUDACC__
int n = inOutputFlag.array->GetNumberOfTuples();
eavlIntArray *inOF = dynamic_cast<eavlIntArray*>(inOutputFlag.array);
eavlIntArray *inOI = dynamic_cast<eavlIntArray*>(inOutputIndex.array);
eavlIntArray *outII = dynamic_cast<eavlIntArray*>(outInputIndex.array);
if (!inOF || !inOI || !outII)
THROW(eavlException,"eavlSimpleReverseIndexOp expects all integer arrays.");
eavlSimpleReverseIndexOp_GPU(n,
(int*)inOF->GetCUDAArray(), inOutputFlag.div, inOutputFlag.mod, inOutputFlag.mul, inOutputFlag.add,
(int*)inOI->GetCUDAArray(), inOutputIndex.div, inOutputIndex.mod, inOutputIndex.mul, inOutputIndex.add,
(int*)outII->GetCUDAArray(), outInputIndex.mul, outInputIndex.add);
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
#endif
|
9620.c
|
/* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <[email protected]>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp target teams distribute #p #p
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
stat_ops.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "stat_ops.h"
#include "utility.h"
#include "constant.h"
double expectation_value_X_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_Y_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_Z_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state, ITYPE dim);
CTYPE transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim);
CTYPE transition_amplitude_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim);
// calculate norm
double state_norm_squared(const CTYPE *state, ITYPE dim) {
ITYPE index;
double norm = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:norm)
#endif
for (index = 0; index < dim; ++index){
norm += pow(cabs(state[index]), 2);
}
return norm;
}
// calculate entropy of probability distribution of Z-basis measurements
double measurement_distribution_entropy(const CTYPE *state, ITYPE dim){
ITYPE index;
double ent=0;
const double eps = 1e-15;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:ent)
#endif
for(index = 0; index < dim; ++index){
double prob = pow(cabs(state[index]),2);
prob = (prob > eps)?prob:eps;
ent += -1.0*prob*log(prob);
}
return ent;
}
// calculate inner product of two state vector
CTYPE state_inner_product(const CTYPE *state_bra, const CTYPE *state_ket, ITYPE dim) {
#ifndef _MSC_VER
CTYPE value = 0;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:value)
#endif
for(index = 0; index < dim; ++index){
value += conj(state_bra[index]) * state_ket[index];
}
return value;
#else
double real_sum = 0.;
double imag_sum = 0.;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:real_sum,imag_sum)
#endif
for (index = 0; index < dim; ++index) {
CTYPE value;
value += conj(state_bra[index]) * state_ket[index];
real_sum += creal(value);
imag_sum += cimag(value);
}
return real_sum + 1.i * imag_sum;
#endif
}
// calculate probability with which we obtain 0 at target qubit
double M0_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
sum += pow(cabs(state[basis_0]),2);
}
return sum;
}
// calculate probability with which we obtain 1 at target qubit
double M1_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_1 = insert_zero_to_basis_index(state_index,mask,target_qubit_index) ^ mask;
sum += pow(cabs(state[basis_1]),2);
}
return sum;
}
// calculate merginal probability with which we obtain the set of values measured_value_list at sorted_target_qubit_index_list
// warning: sorted_target_qubit_index_list must be sorted.
double marginal_prob(const UINT* sorted_target_qubit_index_list, const UINT* measured_value_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE state_index;
double sum=0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index = 0;state_index < loop_dim; ++state_index){
ITYPE basis = state_index;
for(UINT cursor=0; cursor < target_qubit_index_count ; cursor++){
UINT insert_index = sorted_target_qubit_index_list[cursor];
ITYPE mask = 1ULL << insert_index;
basis = insert_zero_to_basis_index(basis, mask , insert_index );
basis ^= mask * measured_value_list[cursor];
}
sum += pow(cabs(state[basis]),2);
}
return sum;
}
// calculate expectation value of X on target qubit
double expectation_value_X_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
ITYPE basis_1 = basis_0 ^ mask;
sum += creal( conj(state[basis_0]) * state[basis_1] ) * 2;
}
return sum;
}
// calculate expectation value of Y on target qubit
double expectation_value_Y_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
ITYPE basis_1 = basis_0 ^ mask;
sum += cimag( conj(state[basis_0]) * state[basis_1] ) * 2;
}
return sum;
}
// calculate expectation value of Z on target qubit
double expectation_value_Z_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
int sign = 1 - 2 * ((state_index >> target_qubit_index)%2);
sum += creal( conj(state[state_index]) * state[state_index] ) * sign;
}
return sum;
}
// calculate expectation value for single-qubit pauli operator
double expectation_value_single_qubit_Pauli_operator(UINT target_qubit_index, UINT Pauli_operator_type, const CTYPE *state, ITYPE dim) {
if(Pauli_operator_type == 0){
return state_norm_squared(state,dim);
}else if(Pauli_operator_type == 1){
return expectation_value_X_Pauli_operator(target_qubit_index, state, dim);
}else if(Pauli_operator_type == 2){
return expectation_value_Y_Pauli_operator(target_qubit_index, state, dim);
}else if(Pauli_operator_type == 3){
return expectation_value_Z_Pauli_operator(target_qubit_index, state, dim);
}else{
fprintf(stderr,"invalid expectation value of pauli operator is called");
exit(1);
}
}
// calculate expectation value of multi-qubit Pauli operator on qubits.
// bit-flip mask : the n-bit binary string of which the i-th element is 1 iff the i-th pauli operator is X or Y
// phase-flip mask : the n-bit binary string of which the i-th element is 1 iff the i-th pauli operator is Y or Z
// We assume bit-flip mask is nonzero, namely, there is at least one X or Y operator.
// the pivot qubit is any qubit index which has X or Y
// To generate bit-flip mask and phase-flip mask, see get_masks_*_list at utility.h
double expectation_value_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask)%2;
sum += creal(state[basis_0] * conj(state[basis_1]) * PHASE_90ROT[ (global_phase_90rot_count + sign_0*2)%4 ] * 2.0);
}
return sum;
}
double expectation_value_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
int bit_parity = count_population(state_index & phase_flip_mask)%2;
int sign = 1 - 2*bit_parity;
sum += pow(cabs(state[state_index]),2) * sign;
}
return sum;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
ITYPE state_index;
#ifndef _MSC_VER
CTYPE sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2;
sum += state_ket[basis_0] * conj(state_bra[basis_1]) * PHASE_90ROT[(global_phase_90rot_count + sign_0 * 2) % 4];
UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2;
sum += state_ket[basis_1] * conj(state_bra[basis_0]) * PHASE_90ROT[(global_phase_90rot_count + sign_1 * 2) % 4];
}
#else
double sum_real = 0.;
double sum_imag = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum_real, sum_imag)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2;
UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2;
CTYPE val1 = state_ket[basis_0] * conj(state_bra[basis_1]) * PHASE_90ROT[(global_phase_90rot_count + sign_0 * 2) % 4];
CTYPE val2 = state_ket[basis_1] * conj(state_bra[basis_0]) * PHASE_90ROT[(global_phase_90rot_count + sign_1 * 2) % 4];
sum_real += creal(val1);
sum_imag += cimag(val1);
sum_real += creal(val2);
sum_imag += cimag(val2);
}
CTYPE sum(sum_real, sum_imag);
#endif
return sum;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
const ITYPE loop_dim = dim;
ITYPE state_index;
#ifndef _MSC_VER
CTYPE sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
double sign = 1 - 2 * bit_parity;
sum += sign*state_ket[state_index] * conj(state_bra[state_index]);
}
return sum;
#else
double sum_real = 0.;
double sum_imag = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum_real, sum_imag)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
double sign = 1 - 2 * bit_parity;
CTYPE val = sign * state_ket[state_index] * conj(state_bra[state_index]);
sum_real += creal(val);
sum_imag += cimag(val);
}
CTYPE sum(sum_real, sum_imag);
#endif
return sum;
}
double expectation_value_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
double result;
if(bit_flip_mask == 0){
result = expectation_value_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state,dim);
}else{
result = expectation_value_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
return result;
}
double expectation_value_multi_qubit_Pauli_operator_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, const CTYPE* state, ITYPE dim){
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
double result;
if(bit_flip_mask == 0){
result = expectation_value_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state, dim);
}else{
result = expectation_value_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
return result;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
CTYPE result;
if (bit_flip_mask == 0) {
result = transition_amplitude_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state_bra, state_ket, dim);
}
else {
result = transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_bra, state_ket, dim);
}
return result;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
CTYPE result;
if (bit_flip_mask == 0) {
result = transition_amplitude_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state_bra, state_ket, dim);
}
else {
result = transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_bra, state_ket, dim);
}
return result;
}
|
vector.c
|
/*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.8 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( int size, int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
int ierr=0;
if (vector)
{
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector));
}
hypre_TFree(vector);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
int size = hypre_VectorSize(vector);
int ierr = 0;
int num_vectors = hypre_VectorNumVectors(vector);
int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
if ( ! hypre_VectorData(vector) )
hypre_VectorData(vector) = hypre_CTAlloc(double, num_vectors*size);
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
++ierr;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
int owns_data )
{
int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
double *data;
int size;
int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet >>> */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
double *data;
int size, num_vectors, vecstride, idxstride;
int i, j;
int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
fprintf(fp, "%d\n", size);
}
else
{
fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
fprintf(fp, "%.14e\n", data[ j*vecstride + i*idxstride ] );
}
}
}
else
{
for (i = 0; i < size; i++)
{
fprintf(fp, "%.14e\n", data[i]);
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
double value )
{
double *vector_data = hypre_VectorData(v);
int size = hypre_VectorSize(v);
int i;
int ierr = 0;
size *=hypre_VectorNumVectors(v);
for (i = 0; i < size; i++)
vector_data[i] = value;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* y should have already been initialized at the same size as x
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
int ierr = 0;
size *=hypre_VectorNumVectors(x);
for (i = 0; i < size; i++)
y_data[i] = x_data[i];
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
int size = hypre_VectorSize(x);
int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorCopy( x, y );
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
int size = hypre_VectorSize(x);
int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorScale( double alpha,
hypre_Vector *y )
{
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(y);
int i;
int ierr = 0;
size *=hypre_VectorNumVectors(y);
for (i = 0; i < size; i++)
y_data[i] *= alpha;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorAxpy( double alpha,
hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
int ierr = 0;
size *=hypre_VectorNumVectors(x);
#pragma omp parallel for schedule(dynamic, size/16)
//#pragma omp parallel for schedule(dynamic)
for (i = 0; i < size; i++)
y_data[i] += alpha * x_data[i];
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
double hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
double result = 0.0;
size *=hypre_VectorNumVectors(x);
for (i = 0; i < size; i++)
result += y_data[i] * x_data[i];
return result;
}
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
double hypre_VectorSumElts( hypre_Vector *vector )
{
double sum = 0;
double * data = hypre_VectorData( vector );
int size = hypre_VectorSize( vector );
int i;
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
|
stencil_threads.c
|
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "malloc2D.h"
#include "timer.h"
#define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp)
int main(int argc, char *argv[])
{
#pragma omp parallel
if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total;
double init_time, flush_time, stencil_time, total_time;
int imax=2002, jmax = 2002;
double** xtmp;
double** x = malloc2D(jmax, imax);
double** xnew = malloc2D(jmax, imax);
int *flush = (int *)malloc(jmax*imax*sizeof(int)*4);
cpu_timer_start(&tstart_total);
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
if (thread_id == 0) cpu_timer_start(&tstart_init);
#pragma omp for
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp for
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
if (thread_id == 0) init_time += cpu_timer_stop(tstart_init);
for (int iter = 0; iter < 10000; iter++){
if (thread_id ==0) cpu_timer_start(&tstart_flush);
#pragma omp for nowait
for (int l = 1; l < jmax*imax*4; l++){
flush[l] = 1.0;
}
if (thread_id == 0){
flush_time += cpu_timer_stop(tstart_flush);
cpu_timer_start(&tstart_stencil);
}
#pragma omp for
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp barrier
if (thread_id == 0){
stencil_time += cpu_timer_stop(tstart_stencil);
SWAP_PTR(xnew, x, xtmp);
if (iter%1000 == 0) printf("Iter %d\n",iter);
}
#pragma omp barrier
}
} // end omp parallel
total_time += cpu_timer_stop(tstart_total);
printf("Timing is init %f flush %f stencil %f total %f\n",
init_time,flush_time,stencil_time,total_time);
}
|
ep.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <math.h>
#include <omp.h>
#include "util.h"
#define TRUE 1
#define FALSE 0
#define DEBUG 1
#define NMAX 1000
#define MAX_LINE 256 /*Intervalo [0, 255]*/
#define RGB_SIZE 256
#define PI 3.14159265359
long int x;
int main(int argc, char **argv) {
FILE *arq1, *arq2;
char *infile, *outfile;
char a[MAX_LINE];
int nr_inter, nr_proc/*, nr_threads*/;
int i, j, k, cont, columns, lines, comp_max_val;
double val, distribute;
double gx, gy, g, angle;
Pixel **M; /*Matriz de pixels*/
i = j = k = 0;
/*Modo de usar*/
if (argc < 5) {
printf("Modo de usar:\n\tArg1: nome do arquivo de entrada;\n\tArg2: nome do arquivo de saída\n\t");
printf("Arg3: número de iterações;\n\tArg4: número de processadores.\n\t");
exit(1);
}
infile = argv[1];
outfile = argv[2];
nr_inter = atoi(argv[3]);
nr_proc = atoi(argv[4]);
if (nr_proc <= 0) nr_proc = 1;
arq1 = fopen(infile, "r");
if (arq1 == NULL)
printf("Erro, não foi possível abrir o arquivo\n");
else {
/*Read the input file*/
if (DEBUG) printf("Arquivo aberto!\n");
cont = 0;
while ((a[0] = fgetc(arq1)) != EOF) {
if (a[0] == '#' || a[0] == 'P') {
fgets(a, MAX_LINE, arq1);
}
else if (cont == 0) {
ungetc(a[0], arq1);
fscanf(arq1,"%d %d\n", &columns, &lines);
fscanf(arq1,"%d\n", &comp_max_val);
cont++;
/*Alocação das matrizes*/
M = (Pixel **) malloc(lines * sizeof(Pixel*));
for (i = 0; i < lines; i++) {
M[i] = (Pixel *) malloc(columns * sizeof(Pixel));
}
}
else {
ungetc(a[0], arq1);
for (i = 0; i < lines; i++) {
for (j = 0; j < columns; j++) {
fscanf(arq1, "%lf %lf %lf", &M[i][j].R, &M[i][j].G, &M[i][j].B);
M[i][j].R /= RGB_SIZE;
M[i][j].G /= RGB_SIZE;
/*M2[i][j].G = (2*PI * M2[i][j].G) / RGB_SIZE; */
M[i][j].B /= RGB_SIZE;
M[i][j].ang = 2 * PI * M[i][j].G;
/* Calcular Rx, Ry, Bx e By quando ler a entrada \/*/
M[i][j].Rx = horizontal_component(M[i][j].R, M[i][j].G);
M[i][j].Bx = (-1) * horizontal_component(M[i][j].B, M[i][j].G);
M[i][j].Ry = vertical_component(M[i][j].R, M[i][j].G);
M[i][j].By = (-1) * vertical_component(M[i][j].B, M[i][j].G);
}
}
break;
}
}
}
fclose(arq1);
if (DEBUG) printf("Arquivo lido!\n");
/*IMPORTANTE: As bordas nunca se alteram.*/
for (k = 0; k < nr_inter; k++) {
if (lines - 2 < nr_proc) nr_proc = 1;
#pragma omp parallel firstprivate(lines, columns) private(i, j, val) num_threads(nr_proc)
{
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
int rest = (lines - 2) % num_threads; /*lines-2 porque elimina as bordas*/
int start, end;
/*Divide os chunks para cada thread. O + 1 é para pular o zero, que é borda*/
/*Como sempre é menor estrito que end, não precisa se preocupar com a borda final*/
start = thread_num * (lines - 2) / num_threads + 1;
if (thread_num != 0 && (thread_num - 1) < rest) start++;
end = (thread_num + 1) * (lines - 2) / num_threads + 1;
if (thread_num < rest) end++;
for (i = start; i < end; i++) { /*Por causa da borda*/
for (j = 1; j < columns - 1; j++) {
if (M[i][j].Rx > 0) {
if (j != columns -1) {
val = transfer(M[i][j+1].R, M[i][j].Rx);
if (i != start && i != end) {
M[i][j+1].Rx += val;
M[i][j].Rx -= val;
}
else {
#pragma omp critical
{
M[i][j+1].Rx += val;
M[i][j].Rx -= val;
}
}
}
if (j != 1) {
val = transfer(M[i][j-1].B, M[i][j].Bx);
if (i != start && i != end) {
/*Recebe no sentido oposto*/
M[i][j-1].Bx += val;
M[i][j].Bx -= val;
}
else {
#pragma omp critical
{
M[i][j-1].Bx += val;
M[i][j].Bx -= val;
}
}
}
}
else { /*Recebe um valor positivo*/
if (j != 1) {
val = transfer(M[i][j-1].R, M[i][j].Rx);
if (i != start && i != end) {
M[i][j-1].Rx -= val;
M[i][j].Rx += val;
}
else {
#pragma omp critical
{
M[i][j-1].Rx -= val;
M[i][j].Rx += val;
}
}
}
if (j != columns - 1) {
val = transfer(M[i][j+1].B, M[i][j].Bx);
if (i != start && i != end) {
M[i][j+1].Bx -= val; /*Recebe no sentido oposto*/
M[i][j].Bx += val;
}
else {
#pragma omp critical
{
M[i][j+1].Bx -= val;
M[i][j].Bx += val;
}
}
}
}
if (M[i][j].Ry > 0) {
if (i != 1) {
val = transfer(M[i-1][j].R, M[i][j].Ry);
if (i != start && i != end) {
M[i-1][j].Ry += val;
M[i][j].Ry -= val;
}
else {
#pragma omp critical
{
M[i-1][j].Ry += val;
M[i][j].Ry -= val;
}
}
}
if (i != lines - 1) {
val = transfer(M[i+1][j].B, M[i][j].By);
if (i != start && i != end) {
M[i+1][j].By += val;
M[i][j].By -= val;
}
else {
#pragma omp critical
{
M[i+1][j].By += val;
M[i][j].By -= val;
}
}
}
}
else { /*Recebe um valor positivo*/
if (i != lines - 1) {
val = transfer(M[i+1][j].R, M[i][j].Ry);
if (i != start && i != end) {
M[i+1][j].Ry -= val;
M[i][j].Ry += val;
}
else {
#pragma omp critical
{
M[i+1][j].Ry -= val;
M[i][j].Ry += val;
}
}
}
if (i != 1) {
val = transfer(M[i-1][j].B, M[i][j].By);
if (i != start && i != end) {
M[i-1][j].By -= val;
M[i][j].By += val;
}
else {
#pragma omp critical
{
M[i-1][j].By -= val;
M[i][j].By += val;
}
}
}
}
}
}
}
/*O bloco abaixo checa se os pixels vizinhos estouraram*/
for (i = 1; i < lines - 1; i++) {
for (j = 1; j < columns - 1; j++) {
/*Checa o R*/
if (M[i][j].R > 1) {
distribute = (M[i][j].R - 1) / 4;
M[i][j].R = 1;
/*Os if's checam se os vizinhos não estão na borda e não serão estourados*/
if (i-1 > 0 && M[i-1][j].R + distribute < 1) M[i-1][j].R += distribute;
if (i+1 < lines && M[i+1][j].R + distribute < 1) M[i+1][j].R += distribute;
if (j-1 > 0 && M[i][j-1].R + distribute < 1) M[i][j-1].R += distribute;
if (j+1 < columns && M[i][j+1].R + distribute < 1) M[i][j+1].R += distribute;
}
/*Checa o B*/
if (M[i][j].B > 1) {
distribute = (M[i][j].B - 1) / 4;
M[i][j].B = 1;
/*Os if's checam se os vizinhos não estão na borda e não serão estourados*/
if (i-1 > 0 && M[i-1][j].B + distribute < 1) M[i-1][j].B += distribute;
if (i+1 < lines && M[i+1][j].B + distribute < 1) M[i+1][j].B += distribute;
if (j-1 > 0 && M[i][j-1].B + distribute < 1) M[i][j-1].B += distribute;
if (j+1 < columns && M[i][j+1].B + distribute < 1) M[i][j+1].B += distribute;
}
}
}
/*Laço para atualizar G*/
for (i = 1; i < lines - 1; i++) {
#pragma omp parallel for num_threads(nr_proc) schedule(dynamic)
for (j = 1; j < columns - 1; j++) {
gx = M[i][j].Rx + M[i][j].Bx;
gy = M[i][j].Ry + M[i][j].By;
g = sqrt((gx*gx) + (gy*gy));
angle = 2 * PI * g;
M[i][j].ang += angle;
M[i][j].G += g;
if (M[i][j].ang > 2 * PI)
M[i][j].ang -= 2*PI;
}
}
}
/*Escreve no arquivo de saída*/
arq2 = fopen(outfile, "w");
if (arq2 == NULL)
printf("Erro, não foi possível abrir o arquivo\n");
else {
fprintf(arq2, "P3\n%d %d\n255\n", columns, lines);
for (i = 0; i < lines; i++)
for (j = 0; j < columns; j++)
fprintf(arq2, "%d %d %d \n",
(int)(RGB_SIZE* M[i][j].R), (int)(RGB_SIZE* M[i][j].ang), (int)(RGB_SIZE* M[i][j].B));
fprintf(stdout, "A imagem foi salva no arquivo: %s\n", outfile);
fclose(arq2);
}
for (i = 0; i < lines; i++) {
free(M[i]);
}
free(M);
return 0;
}
|
variable_bound_move_generator.h
|
/*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_VARIABLE_BOUND_MOVE_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_VARIABLE_BOUND_MOVE_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class VariableBoundMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
VariableBoundMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~VariableBoundMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(const std::vector<model_component::Constraint<
T_Variable, T_Expression> *> &a_RAW_CONSTRAINT_PTRS) {
/**
* Exclude constraints which contain fixed variables or selection
* variables.
*/
auto constraint_ptrs =
extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS);
/**
* Convert constraint objects to BinomialConstraint objects.
*/
auto binomials = convert_to_binomial_constraints(constraint_ptrs);
/**
* Setup move objects.
*/
const int BINOMIALS_SIZE = binomials.size();
this->m_moves.resize(4 * BINOMIALS_SIZE);
this->m_flags.resize(4 * BINOMIALS_SIZE);
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
this->m_moves[4 * i].sense = MoveSense::VariableBound;
this->m_moves[4 * i].alterations.emplace_back(
binomials[i].variable_ptr_first, 0);
this->m_moves[4 * i].alterations.emplace_back(
binomials[i].variable_ptr_second, 0);
this->m_moves[4 * i].is_univariable_move = false;
utility::update_union_set(
&(this->m_moves[4 * i].related_constraint_ptrs),
binomials[i].variable_ptr_first->related_constraint_ptrs());
utility::update_union_set(
&(this->m_moves[4 * i].related_constraint_ptrs),
binomials[i].variable_ptr_second->related_constraint_ptrs());
this->m_moves[4 * i].is_special_neighborhood_move = true;
this->m_moves[4 * i].is_available = true;
this->m_moves[4 * i].overlap_rate = 0.0;
this->m_moves[4 * i + 1] = this->m_moves[4 * i];
this->m_moves[4 * i + 2] = this->m_moves[4 * i];
this->m_moves[4 * i + 3] = this->m_moves[4 * i];
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, binomials, BINOMIALS_SIZE](
auto * a_moves, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
{
auto index = 4 * i;
auto &alterations = (*a_moves)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() +
1)) /
binomials[i].sensitivity_second;
if ((binomials[i].sensitivity_second > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_second < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second =
binomials[i].variable_ptr_first->value() + 1;
alterations[1].second = target;
}
{
auto index = 4 * i + 1;
auto &alterations = (*a_moves)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() -
1)) /
binomials[i].sensitivity_second;
if ((binomials[i].sensitivity_second > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_second < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second =
binomials[i].variable_ptr_first->value() - 1;
alterations[1].second = target;
}
{
auto index = 4 * i + 2;
auto &alterations = (*a_moves)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() +
1)) /
binomials[i].sensitivity_first;
if ((binomials[i].sensitivity_first > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_first < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second = target;
alterations[1].second =
binomials[i].variable_ptr_second->value() + 1;
}
{
auto index = 4 * i + 3;
auto &alterations = (*a_moves)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() -
1)) /
binomials[i].sensitivity_first;
if ((binomials[i].sensitivity_first > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_first < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second = target;
alterations[1].second =
binomials[i].variable_ptr_second->value() - 1;
}
}
const int MOVES_SIZE = a_moves->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_bound_violation((*a_moves)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/
|
omp_alloc_null_fb.c
|
// RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2 * 1024 * 1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_null_fb;
a = omp_init_allocator(omp_default_mem_space, 2, at);
printf("allocator created: %p\n", (void *)a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
#pragma omp barrier
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
// As an allocator has some small memory overhead
// exactly one of the two pointers should be NULL
// because of NULL fallback requested
if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
|
vednnLinearForward.c
|
#include "vednnLinearForward.h"
#include "vednn-def.h"
static inline vednnError_t
vednnLinearForward_wrapper(
vednnLinearForward_t pFunc,
VEDNN_LINEARFWD_ARGS )
{
#ifndef VEDNN_USE_OPENMP
return pFunc(VEDNN_LINEARFWD_ARGS_LIST);
#else
if ( __vednn_omp_num_threads == 1 ) {
return pFunc(VEDNN_LINEARFWD_ARGS_LIST);
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
uint64_t nthreads = omp_get_num_threads() ;
uint64_t threadid = omp_get_thread_num() ;
uint64_t nBatchEach = nBatch / nthreads ;
uint64_t remain = nBatch % nthreads ;
uint64_t batchBegin = nBatchEach * threadid + ( threadid < remain ? threadid : remain ) ;
uint64_t myBatch = nBatchEach + ( threadid < remain ? 1 : 0 ) ;
if( myBatch == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
float* _pDataIn = ((float *)pDataIn) + batchBegin * inDim ;
float* _pDataOut = ((float *)pDataOut) + batchBegin * outDim ;
rc |= pFunc(inDim, outDim, myBatch, _pDataIn, pDataWeight, _pDataOut ) ;
}
}
return rc ;
}
#endif
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnLinearForward( VEDNN_LINEARFWD_ARGS )
{
#define OMPWRAP( IMPL ) WRAP_RET(vednnLinearForward_##IMPL, \
vednnLinearForward_wrapper, VEDNN_LINEARFWD_ARGS_LIST)
if( outDim <= 32 )
OMPWRAP(oU32);
else
{
if( (outDim & 0x01) == 0 &&
(((uint64_t)pDataWeight) & 0x07) == 0 && (((uint64_t)pDataOut) & 0x07) == 0 )
OMPWRAP(o2X_woaligned);
else
OMPWRAP(default);
}
#undef OMPWRAP
}
// vim: et sw=2 ts=2
|
DenseVector.h
|
//=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseVector.h
// \brief Header file for the OpenMP-based dense vector SMP implementation
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/DenseSubvector.h>
#include <blaze/math/expressions/DenseVector.h>
#include <blaze/math/expressions/SparseVector.h>
#include <blaze/math/Functions.h>
#include <blaze/math/intrinsics/IntrinsicTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/SparseSubvector.h>
#include <blaze/math/traits/SubvectorExprTrait.h>
#include <blaze/math/typetraits/IsDenseVector.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/logging/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/typetraits/IsSame.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef IntrinsicTrait<typename VT1::ElementType> IT;
typedef typename SubvectorExprTrait<VT1,aligned>::Type AlignedTarget;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const bool vectorizable( VT1::vectorizable && VT2::vectorizable && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( IT::size - 1UL ) );
const size_t sizePerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( vectorizable && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( vectorizable && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( vectorizable && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >::Type
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function performs the OpenMP-based SMP assignment to a dense vector. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, IsSMPAssignable<VT1>
, IsSMPAssignable<VT2> > >::Type
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT1::ElementType );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT2::ElementType );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef IntrinsicTrait<typename VT1::ElementType> IT;
typedef typename SubvectorExprTrait<VT1,aligned>::Type AlignedTarget;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const bool vectorizable( VT1::vectorizable && VT2::vectorizable && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( IT::size - 1UL ) );
const size_t sizePerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( vectorizable && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( vectorizable && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( vectorizable && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >::Type
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, IsSMPAssignable<VT1>
, IsSMPAssignable<VT2> > >::Type
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT1::ElementType );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT2::ElementType );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be subtracted.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP subtraction assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef IntrinsicTrait<typename VT1::ElementType> IT;
typedef typename SubvectorExprTrait<VT1,aligned>::Type AlignedTarget;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const bool vectorizable( VT1::vectorizable && VT2::vectorizable && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( IT::size - 1UL ) );
const size_t sizePerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( vectorizable && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( vectorizable && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( vectorizable && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment of
// a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a vector to
// a dense vector. Due to the explicit application of the SFINAE principle, this function can
// only be selected by the compiler in case both operands are SMP-assignable and the element
// types of both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >::Type
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due
// to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, IsSMPAssignable<VT1>
, IsSMPAssignable<VT2> > >::Type
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT1::ElementType );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT2::ElementType );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a dense vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef IntrinsicTrait<typename VT1::ElementType> IT;
typedef typename SubvectorExprTrait<VT1,aligned>::Type AlignedTarget;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const bool vectorizable( VT1::vectorizable && VT2::vectorizable && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( IT::size - 1UL ) );
const size_t sizePerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( vectorizable && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( vectorizable && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( vectorizable && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a sparse vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef typename VT1::ElementType ET1;
typedef typename VT2::ElementType ET2;
typedef typename SubvectorExprTrait<VT1,unaligned>::Type UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// vector. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >::Type
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function implements the OpenMP-based SMP multiplication assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline typename EnableIf< And< IsDenseVector<VT1>
, IsSMPAssignable<VT1>
, IsSMPAssignable<VT2> > >::Type
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT1::ElementType );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename VT2::ElementType );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
multAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpMultAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINTS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
csr_matvec.c
|
/*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
int *A_rownnz = hypre_CSRMatrixRownnz(A);
int num_rownnz = hypre_CSRMatrixNumRownnz(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp, tempx;
int i, j, jj;
int m;
double xpar=0.7;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,temp,j) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
double *y_data_expand = NULL;
int offset = 0;
#ifdef HYPRE_USING_OPENMP
int my_thread_num = 0;
#endif
int i, j, jv, jj;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(double, num_threads*y_size);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j, my_thread_num, offset)
{
my_thread_num = omp_get_thread_num();
offset = y_size*my_thread_num;
#pragma omp for schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
#ifdef HYPRE_USING_OPENMP
/* implied barrier */
#pragma omp for schedule(static)
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
/*y_data_expand[j*y_size + i] = 0; //zero out for next time */
}
}
#ifdef HYPRE_USING_OPENMP
} /* end parallel region */
#endif
hypre_TFree(y_data_expand);
}
else
{
/* MULTIPLE VECTORS NOT THREADED YET */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,temp) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
Image.h
|
#include <iostream>
#include <cmath>
#include <omp.h>
//Functions that are integral to the image processing functions
//in the PDImaging python toolbox written by Yngve Mardal Moe.
//Basic image mathematics
void inline multiply_image(double* image, double factor, int y, int x)
{
//Multiplies all elements of :image: by factor
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image[i] *= factor;
}
}
void inline add_image(double* image1, double* image2, int y, int x)
{
//Adds the images :image1: and :image2: together and stores it in :image1:
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image1[i] += image2[i];
}
}
void inline subtract_image_second_from_first(double* image1, double* image2, int y, int x)
{
//Subtracts the elements of :image2: from :image1: and stores it in :image1:
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image1[i] -= image2[i];
}
}
void inline subtract_image_first_from_second(double* image1, double* image2, int y, int x)
{
//Subtracts the elements of :image1: from :image2: and stores it in :image1:
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image1[i] = image2[i] - image1[i];
}
}
void inline set_zero(double* image, int y, int x)
{
//Set all the elements of :image: to zero
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image[i] = 0;
}
}
void inline copy_image(double* image, double* raw, int y, int x)
{
//Copy all elements of :raw: into :image:
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
image[i] = raw[i];
}
}
//Merge sort algorithm
void merge(double* first_half, int first_length, double* second_half, int second_length, double* full_array)
{
int l = 0;
int r = 0;
for (int i = 0; i < first_length + second_length; ++i)
{
if (l >= first_length)
{
full_array[i] = second_half[r];
r++;
}
else if (r >= second_length)
{
full_array[i] = first_half[l];
l++;
}
else if (first_half[l] <= second_half[r])
{
full_array[i] = first_half[l];
l++;
}
else
{
full_array[i] = second_half[r];
r++;
}
}
}
void merge_sort(double* sort_array, int length)
{
if (length < 2)
{
return;
}
else
{
double* temp = new double[length]();
int center = length / 2;
merge_sort(sort_array, center);
merge_sort(sort_array + center, length - center);
merge(sort_array, center, sort_array + center, length - center, temp);
for (int i = 0; i < length; i++)
{
sort_array[i] = temp[i];
}
}
}
//Image statistics
double median(double* image, int y, int x)
{
double* temp_image = new double[y*x]();
copy_image(temp_image, image, y, x);
merge_sort(temp_image, y*x);
double med = ((y*x) % 2 == 1) ? temp_image[(y*x) / 2] : (temp_image[(y*x) / 2] + temp_image[(y*x) / 2 - 1]) / 2.0;
delete[] temp_image;
return med;
}
double sum_squares(double* image, int y, int x)
{
//Computes the square of the 2-norm of :image:
int i;
double norm = 0;
for (i = 0; i < x*y; i++)
{
norm += image[i] * image[i];
}
return norm;
}
double sum_squared_error(double* image, double* raw, int y, int x)
{
//Computes sum{(image[i]-raw[i])^2} for all i
int i;
double norm = 0;
for (i = 0; i < y; i++)
{
norm += (image[i] - raw[i]) * (image[i] - raw[i]);
}
return norm;
}
|
GB_binop__ne_uint16.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint16)
// A*D function (colscale): GB (_AxD__ne_uint16)
// D*A function (rowscale): GB (_DxB__ne_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint16)
// C=scalar+B GB (_bind1st__ne_uint16)
// C=scalar+B' GB (_bind1st_tran__ne_uint16)
// C=A+scalar GB (_bind2nd__ne_uint16)
// C=A'+scalar GB (_bind2nd_tran__ne_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT16 || GxB_NO_NE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shared_value.c
|
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]) {
int nthreads, tid, value = 0;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid) shared(value)
{
/* Obtain thread number */
tid = omp_get_thread_num();
value += tid;
// printf("Value: %d\n",value);
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
printf("Value: %d\n",value);
}
|
Sema.h
|
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
ConceptDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
streaming.c
|
#include "streaming.h"
#include "helper.h"
#include "LBDefinitions.h"
#include "computeCellValues.h"
#include <omp.h>
/* doStremingCell: performs the streaming operation for one cell, in fact each cell receives all the streaming from the neighbor cells */
void doStremingCell(float * collideField, float * streamField, int * flagField, float * massField, float * fractionField, int * node, float * el, int * n, int isInterface, int isFluid, float exchange) {
int i, flag;
int source_node[3];
float fi_nb, se;
for (i = 0; i < Q; i++) {
/* neighboring cell from which particles are obtained */
source_node[0] = node[0] - LATTICEVELOCITIES[i][0];
source_node[1] = node[1] - LATTICEVELOCITIES[i][1];
source_node[2] = node[2] - LATTICEVELOCITIES[i][2];
/* Amount of particles that goes to this cell */
fi_nb = *getEl(collideField, source_node, i, n);
if (isFluid) {
*(el + i) = fi_nb;
}
if (isInterface) {
/* Obtain the flag of the neighbor cell */
flag = *getFlag(flagField, source_node, n);
if (flag == GAS) {
float velocity[3], feq[Q], *fluidCell, rho_ref = 1;
/* get pointer to the fluid cell */
fluidCell = getEl(collideField, node, 0, n);
/* compute velocity of the fluid cell */
computeVelocity(fluidCell, &rho_ref, velocity);
/* compute f-equilibrium of the fluid cell */
computeFeq(&rho_ref, velocity, feq);
/* set boundary */
*(el + i) = feq[Q - i -1] + feq[i] - fluidCell[Q - 1 - i];
} else {
*(el + i) = fi_nb;
}
/* If the neighbor cell is fluid or interface then update the mass value */
if (flag == FLUID || flag == INTERFACE) {
se = fi_nb - *getEl(collideField, source_node, Q - 1 - i, n);
*getMass(massField, node, n) += exchange * se * (*getFraction(fractionField, node, n) + *getFraction(fractionField, source_node, n)) * 0.5;
}
}
}
}
void doStreaming(float * collideField, float * streamField, int * flagField, float * massField, float * fractionField, int * length, int n_threads, float exchange){
int x, y, z, *flag, isFluid, isInterface;
int node[3];
float * el;
int n[3] = { length[0] + 2, length[1] + 2, length[2] + 2 };
/* Loop for inner cells */
#pragma omp parallel for schedule(dynamic) private(x, node, isFluid, flag, isInterface, el) num_threads(n_threads) collapse(2)
for (z = 1; z <= length[2]; z++) {
for (y = 1; y <= length[1]; y++) {
node[2] = z;
node[1] = y;
for (x = 1; x <= length[0]; x++) {
/* Obtain the pointer and the flag for each element */
node[0] = x;
el = getEl(streamField, node, 0, n);
flag = getFlag(flagField, node, n);
isFluid = *flag == FLUID;
isInterface = *flag == INTERFACE;
/* Make streaming just for fluid and interface cells */
if (isFluid || isInterface) {
doStremingCell(collideField, streamField, flagField, massField, fractionField, node, el, n, isInterface, isFluid, exchange);
}
}
}
}
}
|
client_utils.h
|
// Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef CLIENT_UTILS_H
#define CLIENT_UTILS_H
#include <algorithm>
#include <complex>
#include <iostream>
#include <mutex>
#include <numeric>
#include <omp.h>
#include <random>
#include <tuple>
#include <vector>
#include "../shared/printbuffer.h"
#include "rocfft/rocfft.h"
static const size_t ONE_GiB = 1 << 30;
// Determine the size of the data type given the precision and type.
template <typename Tsize>
inline Tsize var_size(const rocfft_precision precision, const rocfft_array_type type)
{
size_t var_size = 0;
switch(precision)
{
case rocfft_precision_single:
var_size = sizeof(float);
break;
case rocfft_precision_double:
var_size = sizeof(double);
break;
}
switch(type)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
var_size *= 2;
break;
default:
break;
}
return var_size;
}
// Container class for test parameters.
class fft_params
{
public:
// All parameters are row-major.
std::vector<size_t> length;
std::vector<size_t> istride;
std::vector<size_t> ostride;
size_t nbatch = 1;
rocfft_precision precision = rocfft_precision_double;
rocfft_transform_type transform_type = rocfft_transform_type_complex_forward;
rocfft_result_placement placement = rocfft_placement_inplace;
size_t idist = 0;
size_t odist = 0;
rocfft_array_type itype = rocfft_array_type_unset;
rocfft_array_type otype = rocfft_array_type_unset;
std::vector<size_t> ioffset = {0, 0};
std::vector<size_t> ooffset = {0, 0};
std::vector<size_t> isize;
std::vector<size_t> osize;
// run testing load/store callbacks
bool run_callbacks = false;
static constexpr double load_cb_scalar = 0.457813941;
static constexpr double store_cb_scalar = 0.391504938;
// Given an array type, return the name as a string.
static std::string array_type_name(const rocfft_array_type type)
{
switch(type)
{
case rocfft_array_type_complex_interleaved:
return "rocfft_array_type_complex_interleaved";
case rocfft_array_type_complex_planar:
return "rocfft_array_type_complex_planar";
case rocfft_array_type_real:
return "rocfft_array_type_real";
case rocfft_array_type_hermitian_interleaved:
return "rocfft_array_type_hermitian_interleaved";
case rocfft_array_type_hermitian_planar:
return "rocfft_array_type_hermitian_planar";
case rocfft_array_type_unset:
return "rocfft_array_type_unset";
}
return "";
}
std::string transform_type_name() const
{
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
return "rocfft_transform_type_complex_forward";
case rocfft_transform_type_complex_inverse:
return "rocfft_transform_type_complex_inverse";
case rocfft_transform_type_real_forward:
return "rocfft_transform_type_real_forward";
case rocfft_transform_type_real_inverse:
return "rocfft_transform_type_real_inverse";
}
}
// Convert to string for output.
std::string str(const std::string& separator = ", ") const
{
std::stringstream ss;
ss << "length:";
for(auto i : length)
ss << " " << i;
ss << separator;
ss << "istride:";
for(auto i : istride)
ss << " " << i;
ss << separator;
ss << "idist: " << idist << separator;
ss << "ostride:";
for(auto i : ostride)
ss << " " << i;
ss << separator;
ss << "odist: " << odist << separator;
ss << "batch: " << nbatch << separator;
ss << "isize:";
for(auto i : isize)
ss << " " << i;
ss << separator;
ss << "osize:";
for(auto i : osize)
ss << " " << i;
ss << separator;
ss << "ioffset:";
for(auto i : ioffset)
ss << " " << i;
ss << separator;
ss << "ooffset:";
for(auto i : ooffset)
ss << " " << i;
ss << separator;
if(placement == rocfft_placement_inplace)
ss << "in-place";
else
ss << "out-of-place";
ss << separator;
ss << "transform_type: " << transform_type_name() << separator;
ss << array_type_name(itype) << " -> " << array_type_name(otype) << separator;
if(precision == rocfft_precision_single)
ss << "single-precision";
else
ss << "double-precision";
ss << separator;
ss << "ilength:";
for(const auto i : ilength())
ss << " " << i;
ss << separator;
ss << "olength:";
for(const auto i : olength())
ss << " " << i;
ss << separator;
ss << "ibuffer_size:";
for(const auto i : ibuffer_sizes())
ss << " " << i;
ss << separator;
ss << "obuffer_size:";
for(const auto i : obuffer_sizes())
ss << " " << i;
ss << separator;
return ss.str();
}
// Stream output operator (for gtest, etc).
friend std::ostream& operator<<(std::ostream& stream, const fft_params& params)
{
stream << params.str();
return stream;
}
// Dimension of the transform.
size_t dim() const
{
return length.size();
}
std::vector<size_t> ilength() const
{
auto ilength = length;
if(transform_type == rocfft_transform_type_real_inverse)
ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1;
return ilength;
}
std::vector<size_t> olength() const
{
auto olength = length;
if(transform_type == rocfft_transform_type_real_forward)
olength[dim() - 1] = olength[dim() - 1] / 2 + 1;
return olength;
}
static size_t nbuffer(const rocfft_array_type type)
{
switch(type)
{
case rocfft_array_type_real:
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
return 1;
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
return 2;
case rocfft_array_type_unset:
return 0;
}
}
// Number of input buffers
size_t nibuffer() const
{
return nbuffer(itype);
}
// Number of output buffers
size_t nobuffer() const
{
return nbuffer(otype);
}
// Compute the farthest point from the original pointer.
size_t compute_ptrdiff(const std::vector<size_t>& length,
const std::vector<size_t>& stride,
const size_t nbatch,
const size_t dist) const
{
size_t val = 0;
if(!length.empty())
{
val = 1;
for(int i = 0; i < length.size(); ++i)
{
val += (length[i] - 1) * stride[i];
}
val += (nbatch - 1) * dist;
}
return val;
}
void set_iotypes()
{
if(itype == rocfft_array_type_unset)
{
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
itype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
itype = rocfft_array_type_real;
break;
case rocfft_transform_type_real_inverse:
itype = rocfft_array_type_hermitian_interleaved;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
if(otype == rocfft_array_type_unset)
{
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
otype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
otype = rocfft_array_type_hermitian_interleaved;
break;
case rocfft_transform_type_real_inverse:
otype = rocfft_array_type_real;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
}
// Check that the input and output types are consistent.
bool check_iotypes() const
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
// Check that format choices are supported
if(transform_type != rocfft_transform_type_real_forward
&& transform_type != rocfft_transform_type_real_inverse)
{
if(placement == rocfft_placement_inplace && itype != otype)
{
throw std::runtime_error(
"In-place transforms must have identical input and output types");
}
}
bool okformat = true;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
okformat = (otype == rocfft_array_type_complex_interleaved
|| otype == rocfft_array_type_complex_planar);
break;
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
okformat = otype == rocfft_array_type_real;
break;
case rocfft_array_type_real:
okformat = (otype == rocfft_array_type_hermitian_interleaved
|| otype == rocfft_array_type_hermitian_planar);
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
return okformat;
}
// Given a length vector, set the rest of the strides.
// The optional argument stride0 sets the stride for the contiguous dimension.
// The optional rcpadding argument sets the stride correctly for in-place
// multi-dimensional real/complex transforms.
// Format is row-major.
template <typename T1>
std::vector<T1> compute_stride(const std::vector<T1>& length,
const std::vector<size_t>& stride0 = std::vector<size_t>(),
const bool rcpadding = false) const
{
const int dim = length.size();
std::vector<T1> stride(dim);
int dimoffset = 0;
if(stride0.size() == 0)
{
// Set the contiguous stride:
stride[dim - 1] = 1;
dimoffset = 1;
}
else
{
// Copy the input values to the end of the stride array:
for(int i = 0; i < stride0.size(); ++i)
{
stride[dim - stride0.size() + i] = stride0[i];
}
}
if(stride0.size() < dim)
{
// Compute any remaining values via recursion.
for(int i = dim - dimoffset - stride0.size(); i-- > 0;)
{
auto lengthip1 = length[i + 1];
if(rcpadding && i == dim - 2)
{
lengthip1 = 2 * (lengthip1 / 2 + 1);
}
stride[i] = stride[i + 1] * lengthip1;
}
}
return stride;
}
void compute_istride()
{
istride = compute_stride(ilength(),
istride,
placement == rocfft_placement_inplace
&& transform_type == rocfft_transform_type_real_forward);
}
void compute_ostride()
{
ostride = compute_stride(olength(),
ostride,
placement == rocfft_placement_inplace
&& transform_type == rocfft_transform_type_real_inverse);
}
void compute_isize()
{
auto il = ilength();
size_t val = compute_ptrdiff(il, istride, nbatch, idist);
isize.resize(nibuffer());
for(int i = 0; i < isize.size(); ++i)
{
isize[i] = val + ioffset[i];
}
}
void compute_osize()
{
auto ol = olength();
size_t val = compute_ptrdiff(ol, ostride, nbatch, odist);
osize.resize(nobuffer());
for(int i = 0; i < osize.size(); ++i)
{
osize[i] = val + ooffset[i];
}
}
std::vector<size_t> ibuffer_sizes() const
{
std::vector<size_t> ibuffer_sizes;
// In-place real-to-complex transforms need to have enough space in the input buffer to
// accomadate the output, which is slightly larger.
if(placement == rocfft_placement_inplace
&& transform_type == rocfft_transform_type_real_forward)
{
return obuffer_sizes();
}
if(isize.empty())
return ibuffer_sizes;
switch(itype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
ibuffer_sizes.resize(2);
break;
default:
ibuffer_sizes.resize(1);
}
for(unsigned i = 0; i < ibuffer_sizes.size(); i++)
{
ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype);
}
return ibuffer_sizes;
}
std::vector<size_t> obuffer_sizes() const
{
std::vector<size_t> obuffer_sizes;
if(osize.empty())
return obuffer_sizes;
switch(otype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
obuffer_sizes.resize(2);
break;
default:
obuffer_sizes.resize(1);
}
for(unsigned i = 0; i < obuffer_sizes.size(); i++)
{
obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype);
}
return obuffer_sizes;
}
// Compute the idist for a given transform based on the placeness, transform type, and data
// layout.
void set_idist()
{
if(idist != 0)
return;
const auto dim = length.size();
// In-place 1D transforms need extra dist.
if(transform_type == rocfft_transform_type_real_forward && dim == 1
&& placement == rocfft_placement_inplace)
{
idist = 2 * (length[0] / 2 + 1) * istride[0];
return;
}
if(transform_type == rocfft_transform_type_real_inverse && dim == 1)
{
idist = (length[0] / 2 + 1) * istride[0];
return;
}
idist = (transform_type == rocfft_transform_type_real_inverse)
? (length[dim - 1] / 2 + 1) * istride[dim - 1]
: length[dim - 1] * istride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
idist = std::max(length[i] * istride[i], idist);
}
}
// Compute the odist for a given transform based on the placeness, transform type, and data
// layout. Row-major.
void set_odist()
{
if(odist != 0)
return;
const auto dim = length.size();
// In-place 1D transforms need extra dist.
if(transform_type == rocfft_transform_type_real_inverse && dim == 1
&& placement == rocfft_placement_inplace)
{
odist = 2 * (length[0] / 2 + 1) * ostride[0];
return;
}
if(transform_type == rocfft_transform_type_real_forward && dim == 1)
{
odist = (length[0] / 2 + 1) * ostride[0];
return;
}
odist = (transform_type == rocfft_transform_type_real_forward)
? (length[dim - 1] / 2 + 1) * ostride[dim - 1]
: length[dim - 1] * ostride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
odist = std::max(length[i] * ostride[i], odist);
}
}
// Return true if the given GPU parameters would produce a valid transform.
bool valid(const int verbose) const
{
if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer())
return false;
// Check that in-place transforms have the same input and output stride:
if(placement == rocfft_placement_inplace)
{
const auto stridesize = std::min(istride.size(), ostride.size());
bool samestride = true;
for(int i = 0; i < stridesize; ++i)
{
if(istride[i] != ostride[i])
samestride = false;
}
if((transform_type == rocfft_transform_type_complex_forward
|| transform_type == rocfft_transform_type_complex_inverse)
&& !samestride)
{
// In-place transforms require identical input and output strides.
if(verbose)
{
std::cout << "istride:";
for(const auto& i : istride)
std::cout << " " << i;
std::cout << " ostride0:";
for(const auto& i : ostride)
std::cout << " " << i;
std::cout << " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == rocfft_transform_type_complex_forward
|| transform_type == rocfft_transform_type_complex_inverse)
&& (idist != odist))
{
// In-place transforms require identical distance
if(verbose)
{
std::cout << "idist:" << idist << " odist:" << odist
<< " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == rocfft_transform_type_real_forward
|| transform_type == rocfft_transform_type_real_inverse)
&& (istride.back() != 1 || ostride.back() != 1))
{
// In-place real/complex transforms require unit strides.
if(verbose)
{
std::cout
<< "istride.back(): " << istride.back()
<< " ostride.back(): " << ostride.back()
<< " must be unitary for in-place real/complex transforms: skipping test"
<< std::endl;
}
return false;
}
if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved))
{
if(verbose)
{
std::cout << "In-place c2c transforms require identical io types; skipped.\n";
}
return false;
}
// Check offsets
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
for(int i = 0; i < nibuffer(); ++i)
{
if(ioffset[i] != ooffset[i])
return false;
}
break;
case rocfft_transform_type_real_forward:
if(ioffset[0] != 2 * ooffset[0])
return false;
break;
case rocfft_transform_type_real_inverse:
if(2 * ioffset[0] != ooffset[0])
return false;
break;
}
}
if(!check_iotypes())
return false;
// The parameters are valid.
return true;
}
// Fill in any missing parameters.
void validate()
{
set_iotypes();
compute_istride();
compute_ostride();
set_idist();
set_odist();
compute_isize();
compute_osize();
}
// Column-major getters:
std::vector<size_t> length_cm() const
{
auto length_cm = length;
std::reverse(std::begin(length_cm), std::end(length_cm));
return length_cm;
}
std::vector<size_t> istride_cm() const
{
auto istride_cm = istride;
std::reverse(std::begin(istride_cm), std::end(istride_cm));
return istride_cm;
}
std::vector<size_t> ostride_cm() const
{
auto ostride_cm = ostride;
std::reverse(std::begin(ostride_cm), std::end(ostride_cm));
return ostride_cm;
}
};
class rocfft_params : public fft_params
{
public:
rocfft_plan plan = nullptr;
rocfft_execution_info info = nullptr;
rocfft_plan_description desc = nullptr;
rocfft_params(){};
~rocfft_params()
{
if(plan != nullptr)
{
rocfft_plan_destroy(plan);
plan = nullptr;
}
if(info != nullptr)
{
rocfft_execution_info_destroy(info);
info = nullptr;
}
if(desc != nullptr)
{
rocfft_plan_description_destroy(desc);
desc = nullptr;
}
};
rocfft_status make_plan()
{
rocfft_status fft_status = rocfft_plan_description_create(&desc);
if(fft_status != rocfft_status_success)
return fft_status;
fft_status = rocfft_plan_description_set_data_layout(desc,
itype,
otype,
ioffset.data(),
ooffset.data(),
istride_cm().size(),
istride_cm().data(),
idist,
ostride_cm().size(),
ostride_cm().data(),
odist);
if(fft_status != rocfft_status_success)
return fft_status;
fft_status = rocfft_plan_create(&plan,
placement,
transform_type,
precision,
length_cm().size(),
length_cm().data(),
nbatch,
desc);
if(fft_status != rocfft_status_success)
return fft_status;
fft_status = rocfft_execution_info_create(&info);
return fft_status;
}
rocfft_status execute(void** in, void** out)
{
return rocfft_execute(plan, in, out, info);
}
};
// This is used with the program_options class so that the user can type an integer on the
// command line and we store into an enum varaible
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_array_type& atype)
{
unsigned tmp;
stream >> tmp;
atype = rocfft_array_type(tmp);
return stream;
}
// similarly for transform type
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_transform_type& ttype)
{
unsigned tmp;
stream >> tmp;
ttype = rocfft_transform_type(tmp);
return stream;
}
// count the number of total iterations for 1-, 2-, and 3-D dimensions
template <typename T1>
size_t count_iters(const T1& i)
{
return i;
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i);
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i) * std::get<2>(i);
}
// Work out how many partitions to break our iteration problem into
template <typename T1>
static size_t compute_partition_count(T1 length)
{
#ifdef BUILD_CLIENTS_TESTS_OPENMP
// we seem to get contention from too many threads, which slows
// things down. particularly noticeable with mix_3D tests
static const size_t MAX_PARTITIONS = 8;
size_t iters = count_iters(length);
size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs()));
if(!hw_threads)
return 1;
// don't bother threading problem sizes that are too small. pick
// an arbitrary number of iterations and ensure that each thread
// has at least that many iterations to process
static const size_t MIN_ITERS_PER_THREAD = 2048;
// either use the whole CPU, or use ceil(iters/iters_per_thread)
return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD);
#else
return 1;
#endif
}
// Break a scalar length into some number of pieces, returning
// [(start0, end0), (start1, end1), ...]
template <typename T1>
std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
// make sure we don't exceed the length
num_parts = std::min(length, num_parts);
std::vector<std::pair<T1, T1>> ret(num_parts);
auto partition_size = length / num_parts;
T1 cur_partition = 0;
for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size)
{
ret[i].first = cur_partition;
ret[i].second = cur_partition + partition_size;
}
// last partition might not divide evenly, fix it up
ret.back().second = length;
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the leftmost part of the tuple, for row-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<2>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<2>(ret[i].second) = std::get<2>(length);
}
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the rightmost part of the tuple, for col-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_colmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<1>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<1>(ret[i].first) = partitions[i].first;
std::get<0>(ret[i].first) = 0;
std::get<1>(ret[i].second) = partitions[i].second;
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_colmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<2>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<2>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].first) = 0;
std::get<2>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
// Specialized computation of index given 1-, 2-, 3- dimension length + stride
template <typename T1, typename T2>
size_t compute_index(T1 length, T2 stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (length * stride) + base;
}
template <typename T1, typename T2>
size_t
compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ base;
}
template <typename T1, typename T2>
size_t compute_index(const std::tuple<T1, T1, T1>& length,
const std::tuple<T2, T2, T2>& stride,
size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ (std::get<2>(length) * std::get<2>(stride)) + base;
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input and output
// types are identical.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to1(const Tval* input,
Tval* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]] = input[idx + ioffset[0]];
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// planar and the output type is complex interleaved.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_2to1(const Tval* input0,
const Tval* input1,
std::complex<Tval>* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]]
= std::complex<Tval>(input0[idx + ioffset[0]], input1[idx + ioffset[1]]);
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// complex interleaved and the output type is planar.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to2(const std::complex<Tval>* input,
Tval* output0,
Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output0[odx + ooffset[0]] = input[idx + ioffset[0]].real();
output1[odx + ooffset[1]] = input[idx + ioffset[0]].imag();
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type given
// by itype, and the output type is given by otype.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
// copy 1to2
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<float*>(output[0].data()),
reinterpret_cast<float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<double*>(output[0].data()),
reinterpret_cast<double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
// copy 2 to 1
switch(precision)
{
case rocfft_precision_single:
copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
}
// unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return copy_buffers(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
ioffset,
ooffset);
case 2:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
ioffset,
ooffset);
case 3:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of complex type.
struct VectorNorms
{
double l_2 = 0.0, l_inf = 0.0;
};
template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_complex(const Tcomplex* input,
const Tcomplex* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_colmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output[odx + ooffset[0]].real() - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output[odx + ooffset[0]].imag() - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of real type.
template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_real(const Tfloat* input,
const Tfloat* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double diff = std::abs(output[odx + ooffset[0]] - input[idx + ioffset[0]]);
cur_linf = std::max(diff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += diff * diff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. input is complex-interleaved, output is complex-planar.
template <typename Tval, typename Tint1, typename T2, typename T3>
inline VectorNorms distance_1to2(const std::complex<Tval>* input,
const Tval* output0,
const Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const T3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output0[odx + ooffset[0]] - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output1[odx + ooffset[1]] - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-inifnity and L-2 distance between two buffers of dimension length and
// with types given by itype, otype, and precision.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
VectorNorms dist;
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms d;
switch(precision)
{
case rocfft_precision_single:
d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<const float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<const double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_inf = std::max(d.l_inf, dist.l_inf);
dist.l_2 += d.l_2 * d.l_2;
}
break;
default:
throw std::runtime_error("Invalid input and output types.");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const float*>(output[0].data()),
reinterpret_cast<const float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const double*>(output[0].data()),
reinterpret_cast<const double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()),
reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()),
reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
dist.l_2 = sqrt(dist.l_2);
return dist;
}
// Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return distance(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 2:
return distance(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 3:
return distance(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 norm of a buffer with strides istride and
// length idist. Data is std::complex.
template <typename Tcomplex, typename T1, typename T2>
inline VectorNorms norm_complex(const Tcomplex* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double rval = std::abs(input[idx + offset[0]].real());
cur_linf = std::max(rval, cur_linf);
cur_l2 += rval * rval;
const double ival = std::abs(input[idx + offset[0]].imag());
cur_linf = std::max(ival, cur_linf);
cur_l2 += ival * ival;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data is real-valued.
template <typename Tfloat, typename T1, typename T2>
inline VectorNorms norm_real(const Tfloat* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double val = std::abs(input[idx + offset[0]]);
cur_linf = std::max(val, cur_linf);
cur_l2 += val * val;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data format is given by precision and itype.
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const T1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
VectorNorms norm;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_2 *= norm.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms n;
switch(precision)
{
case rocfft_precision_single:
n = norm_real(reinterpret_cast<const float*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
n = norm_real(reinterpret_cast<const double*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_inf = std::max(n.l_inf, norm.l_inf);
norm.l_2 += n.l_2 * n.l_2;
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
norm.l_2 = sqrt(norm.l_2);
return norm;
}
// Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<T1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type type,
const std::vector<T2>& stride,
const size_t dist,
const std::vector<size_t>& offset)
{
switch(length.size())
{
case 1:
return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset);
case 2:
return norm(input,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1]),
dist,
offset);
case 3:
return norm(input,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1], stride[2]),
dist,
offset);
default:
abort();
}
}
// Given a buffer of complex values stored in a vector of chars (or two vectors in the
// case of planar format), impose Hermitian symmetry.
// NB: length is the dimensions of the FFT, not the data layout dimensions.
template <typename Tfloat, typename Tallocator, typename Tsize>
inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride,
const Tsize idist,
const Tsize nbatch)
{
switch(vals.size())
{
case 1:
{
// Complex interleaved data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist;
switch(length.size())
{
case 3:
if(length[2] % 2 == 0)
{
data[istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[1] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)
+ istride[2] * (length[2] / 2)]
.imag(0.0);
// clang format off
}
// y-axis:
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]);
}
if(length[0] % 2 == 0)
{
// y-axis at x-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]);
// clang format on
}
}
// x-axis:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
// x-axis at y-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
// clang format on
}
}
// x-y plane:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * i + istride[1] * j]);
// clang format on
}
}
if(length[2] % 2 == 0)
{
// x-axis at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
if(length[1] % 2 == 0)
{
// x-axis at yz-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
}
// y-axis: at z-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]);
}
if(length[0] % 2 == 0)
{
// y-axis: at xz-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
// x-y plane: at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
}
// fall-through
case 2:
if(length[1] % 2 == 0)
{
data[istride[1] * (length[1] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0);
}
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
}
}
// fall-through
case 1:
data[0].imag(0.0);
if(length[0] % 2 == 0)
{
data[istride[0] * (length[0] / 2)].imag(0.0);
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
case 2:
{
// Complex planar data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist;
switch(length.size())
{
case 3:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 2:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 1:
idata[0] = 0.0;
if(length[0] % 2 == 0)
{
idata[istride[0] * (length[0] / 2)] = 0.0;
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid data type");
break;
}
}
// Given an array type and transform length, strides, etc, load random floats in [0,1]
// into the input array of floats/doubles or complex floats/doubles, which is stored in a
// vector of chars (or two vectors in the case of planar format).
// lengths are the memory lengths (ie not the transform parameters)
template <typename Tfloat, typename Tallocator, typename Tint1>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const Tint1& whole_length,
const Tint1& istride,
const size_t idist,
const size_t nbatch)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
{
auto idata = (std::complex<Tfloat>*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat x = (Tfloat)gen() / (Tfloat)gen.max();
const Tfloat y = (Tfloat)gen() / (Tfloat)gen.max();
const std::complex<Tfloat> val(x, y);
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
{
auto ireal = (Tfloat*)input[0].data();
auto iimag = (Tfloat*)input[1].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
ireal[i] = val.real();
iimag[i] = val.imag();
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_real:
{
auto idata = (Tfloat*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max();
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
default:
throw std::runtime_error("Input layout format not yet supported");
break;
}
}
// unroll set_input for dimension 1, 2, 3
template <typename Tfloat, typename Tallocator>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const std::vector<size_t>& length,
const std::vector<size_t>& istride,
const size_t idist,
const size_t nbatch)
{
switch(length.size())
{
case 1:
set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch);
break;
case 2:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1]),
std::make_tuple(istride[0], istride[1]),
idist,
nbatch);
break;
case 3:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1], length[2]),
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
nbatch);
break;
default:
abort();
}
}
// Given a data type and precision, the distance between batches, and
// the batch size, allocate the required host buffer(s).
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> allocate_host_buffer(
const rocfft_precision precision, const rocfft_array_type type, const std::vector<size_t>& size)
{
std::vector<std::vector<char, Allocator>> buffers(size.size());
for(int i = 0; i < size.size(); ++i)
{
buffers[i].resize(size[i] * var_size<size_t>(precision, type));
}
return buffers;
}
// Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if
// necessary.
// NB: length is the logical size of the FFT, and not necessarily the data dimensions
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> compute_input(const fft_params& params)
{
auto input = allocate_host_buffer<Allocator>(params.precision, params.itype, params.isize);
for(auto& i : input)
{
std::fill(i.begin(), i.end(), 0.0);
}
switch(params.precision)
{
case rocfft_precision_double:
set_input<double>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
set_input<float>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
}
if(params.itype == rocfft_array_type_hermitian_interleaved
|| params.itype == rocfft_array_type_hermitian_planar)
{
switch(params.precision)
{
case rocfft_precision_double:
impose_hermitian_symmetry<double>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
impose_hermitian_symmetry<float>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
}
}
return input;
}
#endif
|
estimate_thetae.c
|
/******************************************************************************
* *
* ESTIMATE_THETAE.C *
* *
* ESTIMATE THETAE AT END OF TIMESTEP DUE TO RADIATION SOURCES *
* *
******************************************************************************/
#include "decs.h"
#if RADIATION
#if ESTIMATE_THETAE
#include <gsl/gsl_errno.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_roots.h>
#define NTHREADS (24)
// ESTIMATE THETAE BEFORE PUSH TO AVOID ISSUES WITH MPI COMMUNICATION
double Thetae_est[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG];
double Thetae_old[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG];
double Ucon[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NDIM];
double Ucov[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NDIM];
double Bcov[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NDIM];
double Ne[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG];
double Bmag[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG];
int Nsph_zone[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
int Nsph_cntd[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
double * w[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
double * nu[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
double * dlam[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
double * theta[N1 + 2 * NG][N2 + 2 * NG][N3 + 2 * NG][NTHREADS];
static int type = 0; // TODO: remove me when type loops are in place.
static int interaction = 0; // TODO: remove me when type loops are in place.
double get_Thetae_est(int i, int j, int k) { return Thetae_est[i][j][k]; }
// Rootfinding parameters and function
struct of_params {
int i, j, k;
double rho, Ne, Bmag, Thetaei, Ucon0, dt;
double extra[EOS_NUM_EXTRA];
};
double dEdt(double Thetae, void *params) {
struct of_params *p = (struct of_params *)params;
int i = p->i;
int j = p->j;
int k = p->k;
double rho = p->rho;
double dt = p->dt;
double * extra = p->extra;
// TODO: encapsulate electrons in EOS framework.
double uei, uef;
#if ELECTRONS && EOS == EOS_TYPE_GAMMA
uei = Ne[i][j][k] * Thetae_old[i][j][k] * ME * CL * CL / (game - 1.);
uef = Ne[i][j][k] * Thetae * ME * CL * CL / (game - 1.);
#else
uei = EOS_u_N_Theta(rho, Ne[i][j][k], Thetae_old[i][j][k], extra);
uef = EOS_u_N_Theta(rho, Ne[i][j][k], Thetae, extra);
#endif
struct of_microphysics micro;
micro.Thetae = Thetae;
micro.Ne = Ne[i][j][k];
micro.B = Bmag[i][j][k];
double J = get_J(&m);
double vol = ggeom[i][j][CENT].g * dx[1] * dx[2] * dx[3] * dt;
// Loop over superphotons
double udotG_abs = 0.;
double udotG_scatt = 0.;
for (int n = 0; n < nthreads; n++) {
for (int m = 0; m < Nsph_zone[i][j][k][n]; m++) {
double udotk = HPL * nu[i][j][k][n][m] / (ME * CL * CL);
// Absorption
double alpha_inv_a =
alpha_inv_abs(nu[i][j][k][n][m], type, µ, theta[i][j][k][n][m]);
double dtau_a =
alpha_inv_a * L_unit * HPL / (ME * CL * CL) * dlam[i][j][k][n][m];
double dw_a = w[i][j][k][n][m] * (1. - exp(-dtau_a));
udotG_abs += kphys_to_num * dw_a * udotk / vol;
// dEtau_abs += HPL*nu[i][j][k][n][m]*w[i][j][k][n][m]*(1. -
// exp(-dtau_a));
// Scattering (assuming h \nu << k_B T_e)
double alpha_inv_s =
alpha_inv_scatt(nu[i][j][k][n][m], type, interaction, µ);
double dtau_s =
alpha_inv_s * L_unit * HPL / (ME * CL * CL) * dlam[i][j][k][n][m];
double dw_s = w[i][j][k][n][m] * (1. - exp(-dtau_s));
double amp =
1. + 4. * Thetae - 2. * pow(Thetae, 3. / 2.) + 16. * pow(Thetae, 2.);
udotG_scatt += kphys_to_num * (1. - amp) * dw_s * udotk / vol;
// dEdtau_scatt -= HPL*nu[i][j][k][n][m]*;
}
}
// dEdtau_abs *= Ucon[i][j][k][0]/(/* d3xi! */dt*T_unit);
// dEdtau_scatt *= Ucon[i][j][k][0]/(dt*T_unit);
udotG_abs *= U_unit / T_unit;
udotG_scatt *= U_unit / T_unit;
// if (udotG_abs != 0. || udotG_scatt != 0.) {
/*if (i == 80 && j == 64) {
printf("Thetae Ne Bmag = %e %e %e\n", Thetae, Ne[i][j][k], Bmag[i][j][k]);
printf("%e %e %e %e\n", Ucon[i][j][k][0]*(uef - uei)/dt,
J, udotG_abs, udotG_scatt);
}*/
// Solve entropy equation in cgs:
// d u_e / d \tau = -emission + absorption - upscattering
double resid = Ucon[i][j][k][0] * (uef - uei) / (dt * T_unit);
resid += J;
resid += udotG_abs;
resid += udotG_scatt;
return resid;
}
void estimate_Thetae(
grid_prim_type P, grid_eosvar_type extra, double t, double dt) {
if (NTHREADS != nthreads) {
fprintf(stderr, "NTHREADS = %i nthreads = %i! Exiting...\n", NTHREADS,
nthreads);
exit(-1);
}
struct of_microphysics micro;
// Count superphotons in each zone
#pragma omp parallel
{
int n = omp_get_thread_num();
struct of_photon *ph = photon_lists[n];
while (ph != NULL) {
int i, j, k;
double X[NDIM], Kcov[NDIM], Kcon[NDIM];
get_X_K_interp(ph, t, P, X, Kcov, Kcon);
Xtoijk(X, &i, &j, &k);
Nsph_zone[i][j][k][n]++;
ph = ph->next;
}
} // omp parallel
// malloc required memory and store ucon for convenience
#pragma omp parallel for collapse(3)
ZLOOP {
double Bcon[NDIM];
get_fluid_zone(i, j, k, P, extra, µ, Ucon[i][j][k], Ucov[i][j][k],
Bcon, Bcov[i][j][k]);
Ne[i][j][k] = micro.Ne;
Thetae_old[i][j][k] = micro.Thetae;
Bmag[i][j][k] = micro.B;
for (int n = 0; n < nthreads; n++) {
w[i][j][k][n] = safe_malloc(Nsph_zone[i][j][k][n] * sizeof(double));
nu[i][j][k][n] = safe_malloc(Nsph_zone[i][j][k][n] * sizeof(double));
dlam[i][j][k][n] = safe_malloc(Nsph_zone[i][j][k][n] * sizeof(double));
theta[i][j][k][n] = safe_malloc(Nsph_zone[i][j][k][n] * sizeof(double));
Nsph_cntd[i][j][k][n] = 0;
}
} // omp parallel
// Create per-zone lists of w, nu, dlam
#pragma omp parallel
{
int n = omp_get_thread_num();
struct of_photon *ph = photon_lists[n];
while (ph != NULL) {
int i, j, k;
double X[NDIM], Kcov[NDIM], Kcon[NDIM];
get_X_K_interp(ph, t, P, X, Kcov, Kcon);
Xtoijk(X, &i, &j, &k);
if (i < NG || i > NG + N1 - 1 || j < NG || j > NG + N2 - 1 || k < NG ||
k > NG + N3 - 1) {
printf("BAD PH????\n");
printf("[%i] %i %i %i X[] = %e %e %e %e\n", n, i, j, k, X[0], X[1],
X[2], X[3]);
for (int mu = 0; mu < 3; mu++) {
printf("X[%i][] = %e %e %e %e\n", mu, ph->X[mu][0], ph->X[mu][1],
ph->X[mu][2], ph->X[mu][3]);
printf("Kcov[%i][] = %e %e %e %e\n", mu, ph->Kcov[mu][0],
ph->Kcov[mu][1], ph->Kcov[mu][2], ph->Kcov[mu][3]);
printf("Kcon[%i][] = %e %e %e %e\n", mu, ph->Kcon[mu][0],
ph->Kcon[mu][1], ph->Kcon[mu][2], ph->Kcon[mu][3]);
}
printf("origin %i %i %i %i\n", ph->origin[0], ph->origin[1],
ph->origin[2], ph->origin[3]);
printf("nscatt = %i\n", ph->nscatt);
exit(-1);
}
double freq = 0.;
for (int mu = 0; mu < NDIM; mu++) {
freq -= Ucon[i][j][k][mu] * Kcov[mu];
}
freq *= ME * CL * CL / HPL;
w[i][j][k][n][Nsph_cntd[i][j][k][n]] = ph->w;
nu[i][j][k][n][Nsph_cntd[i][j][k][n]] = freq;
dlam[i][j][k][n][Nsph_cntd[i][j][k][n]] = dt / Kcon[0];
theta[i][j][k][n][Nsph_cntd[i][j][k][n]] =
get_bk_angle(X, Kcon, Ucov[i][j][k], Bcov[i][j][k], Bmag[i][j][k]);
Nsph_cntd[i][j][k][n]++;
ph = ph->next;
}
} // omp parallel
// In each zone, rootfind to Thetae that matches sources. If failure, simply
// return current Thetae
#pragma omp parallel
{
const gsl_root_fsolver_type *T;
gsl_root_fsolver * s;
gsl_function F;
F.function = &dEdt;
T = gsl_root_fsolver_brent;
s = gsl_root_fsolver_alloc(T);
static double extra[EOS_NUM_EXTRA];
#pragma omp threadprivate(extra)
#if EOS == EOS_TYPE_GAMMA
EOS_ELOOP { extra[e] = 0.0; }
#endif
#pragma omp for collapse(3)
ZLOOP {
#if EOS == EOS_TYPE_TABLE
extra[EOS_YE] = P[i][j][k][YE];
#endif
// fill parameters
struct of_params params;
params.i = i;
params.j = j;
params.k = k;
params.rho = P[i][j][k][RHO];
params.Ne = Ne[i][j][k];
params.Bmag = Bmag[i][j][k];
params.Thetaei = Thetae_old[i][j][k];
params.Ucon0 = Ucon[i][j][k][0];
params.dt = dt;
// hopefully this doesn't break openmp
EOS_ELOOP { params.extra[e] = extra[e]; }
F.params = ¶ms;
double r = 0.;
double Thetae_lo = 0.5 * Thetae_old[i][j][k];
double Thetae_hi = 2. * Thetae_old[i][j][k];
// Test interval for sanity
double rmin = dEdt(Thetae_lo, ¶ms);
double rmax = dEdt(Thetae_hi, ¶ms);
if (rmin * rmax > 0.) {
fprintf(stderr, "[%i %i %i] Root not bracketed!\n", i, j, k);
Thetae_est[i][j][k] = Thetae_old[i][j][k];
continue;
}
gsl_root_fsolver_set(s, &F, Thetae_lo, Thetae_hi);
int iter = 0, max_iter = 100;
int status;
do {
status = gsl_root_fsolver_iterate(s);
r = gsl_root_fsolver_root(s);
Thetae_lo = gsl_root_fsolver_x_lower(s);
Thetae_hi = gsl_root_fsolver_x_upper(s);
status = gsl_root_test_interval(Thetae_lo, Thetae_hi, 0, 0.001);
} while (status == GSL_CONTINUE && iter < max_iter);
if (status != GSL_SUCCESS) {
Thetae_est[i][j][k] = Thetae_old[i][j][k];
} else {
Thetae_est[i][j][k] = r;
}
}
gsl_root_fsolver_free(s);
} // omp parallel
// Clean up mallocs and reset counters
#pragma omp parallel for collapse(3)
ZLOOP {
for (int n = 0; n < nthreads; n++) {
free(w[i][j][k][n]);
free(nu[i][j][k][n]);
free(dlam[i][j][k][n]);
free(theta[i][j][k][n]);
Nsph_zone[i][j][k][n] = 0.;
}
} // omp parallel
}
#endif // ESTIMATE_THETAE
#endif // RADIATION
|
GB_binop__minus_int8.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int8)
// A*D function (colscale): GB (_AxD__minus_int8)
// D*A function (rowscale): GB (_DxB__minus_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int8)
// C=scalar+B GB (_bind1st__minus_int8)
// C=scalar+B' GB (_bind1st_tran__minus_int8)
// C=A+scalar GB (_bind2nd__minus_int8)
// C=A'+scalar GB (_bind2nd_tran__minus_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT8 || GxB_NO_MINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hsaxpy.c
|
/**
* @file hsaxpy.c
* @brief Function definition for performing the \c saxpy operation on host.
*
* This source file contains function definition for the \c saxpy operation,
* which is defined as:
*
* y := a * x + y
*
* where:
*
* - a is a scalar.
* - x and y are single-precision vectors each with n elements.
*
* @author Xin Wu (PC²)
* @date 05.04.2020
* @copyright CC BY-SA 2.0
*/
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "wtcalc.h"
#include "hsaxpy.h"
void hsaxpy(const int n,
const float a,
const float *x,
float *y)
{
struct timespec rt[2];
/*
* - naive implementation
*/
clock_gettime(CLOCK_REALTIME, rt + 0);
#pragma omp parallel for simd schedule(simd:static) \
default(none) shared(a, n, x, y)
for (int i = 0; i < n; i++) {
y[i] = a * x[i] + y[i];
}
clock_gettime(CLOCK_REALTIME, rt + 1);
if (wtcalc >= 0.0) {
wtcalc += (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.