source
stringlengths 3
92
| c
stringlengths 26
2.25M
|
|---|---|
parallel-reduction.c
|
/*
* parallel-reduction.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run| FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
// Number of threads is empirical: We need enough threads so that
// the reduction is really performed hierarchically in the barrier!
#pragma omp parallel num_threads(5) reduction(+ : var)
{ var = 1; }
fprintf(stderr, "DONE\n");
int error = (var != 5);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
LAGraph_BF_full1a.c
|
//------------------------------------------------------------------------------
// LAGraph_BF_full1a.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact [email protected] for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_BF_full1a: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full1a returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dmasked); \
GrB_free(&dless); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_LT_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGRAPH_FREE (I); \
LAGRAPH_FREE (J); \
LAGRAPH_FREE (w); \
LAGRAPH_FREE (W); \
LAGRAPH_FREE (h); \
LAGRAPH_FREE (pi); \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN3
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs3
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF_LT3
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full1a
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dmasked = NULL, dless = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_LT_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
if (pd_output != NULL) *pd_output = NULL;
if (ppi_output != NULL) *ppi_output = NULL;
if (ph_output != NULL) *ph_output = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nz, A);
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGr_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct));
// GrB_BinaryOp
LAGr_BinaryOp_new(&BF_LT_Tuple3,
(LAGraph_binary_function) (&BF_LT3), GrB_BOOL, BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN3), BF_Tuple3, BF_Tuple3,BF_Tuple3);
LAGr_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs3),
BF_Tuple3, BF_Tuple3, BF_Tuple3);
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGr_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3);
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
#if 1
I = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_malloc (nz, sizeof(double)) ;
W = LAGraph_malloc (nz, sizeof(BF_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads = LAGraph_get_nthreads ( ) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
LAGr_Matrix_new(&Atmp, BF_Tuple3, n, n);
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGRAPH_FREE (I);
LAGRAPH_FREE (J);
LAGRAPH_FREE (W);
LAGRAPH_FREE (w);
#else
TODO: GraphBLAS could use a new kind of unary operator, not z=f(x), but
[z,flag] = f (aij, i, j, k, nrows, ncols, nvals, etc, ...)
flag: keep or discard. Combines GrB_apply and GxB_select.
builtins:
f(...) =
i, bool is true
j, bool is true
i+j*nrows, etc.
k
tril, triu (like GxB_select): return aij, and true/false boolean
z=f(x,i). x: double, z:tuple3, i:GrB_Index with the row index of x
// z = (BF_Tuple3_struct) { .w = x, .h = 1, .pi = i + 1 };
GrB_apply (Atmp, op, A, ...)
in the BFS, this is used:
op: z = f ( .... ) = i
to replace x(i) with i
#endif
//--------------------------------------------------------------------------
// create and initialize "distance" vector d, dmasked and dless
//--------------------------------------------------------------------------
LAGr_Vector_new(&d, BF_Tuple3, n);
// make d dense
LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity,
GrB_ALL, n, NULL));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
// creat dmasked as a sparse vector with only one entry at s
LAGr_Vector_new(&dmasked, BF_Tuple3, n);
LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s));
// create dless
LAGr_Vector_new(&dless, GrB_BOOL, n);
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
bool any_dless= true; // if there is any newly found shortest path
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (any_dless && iter < n - 1)
{
// execute semiring on dmasked and A, and save the result to dmasked
LAGr_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL);
// dless = d .< dtmp
LAGr_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d,
NULL);
// if there is no entry with smaller distance then all shortest paths
// are found
LAGr_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless,
NULL) ;
if(any_dless)
{
// update all entries with smaller distances
//LAGr_apply(d, dless, NULL, BF_Identity_Tuple3,
// dmasked, NULL));
LAGr_assign(d, dless, NULL, dmasked, GrB_ALL, n, NULL);
// only use entries that were just updated
//LAGRAPH_OK (GrB_Vector_clear(dmasked));
//LAGRAPH_OK (GrB_apply(dmasked, dless, NULL, BF_Identity_Tuple3,
// d, NULL));
//try:
LAGr_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R);
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (any_dless)
{
// execute semiring again to check for negative-weight cycle
LAGr_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL);
// dless = d .< dtmp
LAGr_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d,
NULL);
// if there is no entry with smaller distance then all shortest paths
// are found
LAGr_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless,
NULL) ;
if(any_dless)
{
// printf("A negative-weight cycle found. \n");
LAGRAPH_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_malloc (n, sizeof(BF_Tuple3_struct)) ;
w = LAGraph_malloc (n, sizeof(double)) ;
h = LAGraph_malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
// TODO: create 3 unary ops, and use GrB_apply?
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d));
for (GrB_Index k = 0; k < n; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGr_Vector_new(pd_output, GrB_FP64, n);
LAGr_Vector_new(ppi_output, GrB_UINT64, n);
LAGr_Vector_new(ph_output, GrB_UINT64, n);
LAGr_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 );
LAGr_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64);
LAGr_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64);
LAGRAPH_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
dft.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <float.h>
#include <stdint.h>
#include <getopt.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <complex.h>
#include <fftw3.h>
#include <time.h>
#include "grid.h"
void image_dft(double complex *uvgrid, int grid_size, double lambda,
struct vis_data *vis, int iter){
int total_steps = grid_size;
int steps_completed = 0;
#pragma omp parallel for schedule(dynamic)
for (int y = 0; y<grid_size; y+=10){
int l = (y - grid_size / 2)/lambda;
for (int x = 0; x<grid_size; x+=iter){
int m = (x - grid_size / 2)/lambda;
double real_p = 0;
double complex_p = 0;
for(int bl = 0; bl < vis->bl_count; ++bl){
for(int time = 0; time < vis->bl[bl].time_count; ++time){
for (int freq = 0; freq < vis->bl[bl].freq_count; ++freq){
double complex visibility = vis->bl[bl].vis[time*vis->bl[bl].freq_count + freq];
double subang1 = m * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq];
double subang2 = l * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq + 1];
double subang3 = (sqrt(1-l*l-m*m)-1) * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq + 2];
double angle = 2 * M_PI * subang1 + subang2 + subang3;
real_p += creal(visibility) * cos(angle) + cimag(visibility) * sin(angle);
complex_p += -creal(visibility) * sin(angle) + cimag(visibility) * cos(angle);
}
}
}
uvgrid[y*grid_size + x] = real_p + complex_p * I;
//printf("Progress: %d/%d \r",(y*grid_size + x),(grid_size*grid_size));
}
#pragma omp atomic
++steps_completed;
#pragma omp critical
printf("Progress: %d/%d \r",steps_completed,total_steps);
}
}
int main(int argc, char *argv[]){
//Structure for reporting memory usage:
struct rusage *rusage_cp = malloc(sizeof(struct rusage));
// Read parameters
static struct option options[] =
{
{"theta", required_argument, 0, 't' },
{"lambda", required_argument, 0, 'l' },
{"image", optional_argument, 0, 'i' },
{"min-bl", optional_argument, 0, 'b' },
{"max-bl", optional_argument, 0, 'B' },
{"iter", optional_argument, 0, 'I' },
{0, 0, 0, 0}
};
int option_index = 0;
double theta = 0, lambda = 0;
char *image_file = NULL;
double bl_min = DBL_MIN, bl_max = DBL_MAX;
int c; int invalid = 0;
long iter = 1;
while ((c = getopt_long(argc, argv, ":", options, &option_index)) != -1) {
switch(c) {
case 't': theta = atof(optarg); break;
case 'l': lambda = atof(optarg); break;
case 'i': image_file = optarg; break;
case 'b': bl_min = atof(optarg); break;
case 'B': bl_max = atof(optarg); break;
case 'I': iter = atol(optarg); break;
default: invalid = 1; break;
}
}
// Check grid parameters
int grid_size = (int)(theta * lambda);
size_t grid_byte_size = grid_size * grid_size * sizeof(double complex);
if (grid_size <= 0) {
fprintf(stderr, "Invalid grid configuration!\n");
invalid = 1;
}
// Must have an input file
const char *vis_file = 0;
if (optind + 1 == argc) {
vis_file = argv[optind];
} else {
printf("Please supply a visibility input file!\n");
invalid = 1;
}
if (invalid) {
printf("usage: %s --theta=THETA --lambda=LAM [--image=IMAGE]\n", argv[0]);
printf(" [--min-bl=MIN_BL] [--max-bl=MAX_BL]\n");
printf(" INPUT\n");
printf("\n");
printf("optional arguments:\n");
printf(" --theta=THETA Field of view size (in radians)\n");
printf(" --lambda=LAM uv grid size (in wavelenghts)\n");
printf(" --image=IMAGE image output file\n");
printf(" --min-bl=MIN_BL Minimum baseline length to consider (in km)\n");
printf(" --max-bl=MAX_BL Maximum baseline length to consider (in km)\n");
printf(" --iter=ITER Samples every +=ITER point in fourier space. Quickens DFT.\n");
printf("positional arguments:\n");
printf(" input input visibilities\n");
return 1;
}
// Intialise HDF5
init_dtype_cpx();
// Open files
struct vis_data vis;
int grid_fd = -1, image_fd = -1;
if (load_vis(vis_file, &vis, bl_min, bl_max)) {
return 1;
}
if (image_file) {
image_fd = open(image_file, O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (image_fd == -1) {
perror("Failed to open image file");
return 1;
}
}
// Allocate grid
printf("\nGrid size: %d x %d (%.2f GB)\n", grid_size, grid_size, (double)(grid_byte_size)/1000000000);
double complex *uvgrid = (double complex *)calloc(grid_byte_size, 1);
// Simple uniform weight (we re-use the grid to save an allocation)
printf("Weighting...\n");
weight((unsigned int *)uvgrid, grid_size, theta, &vis);
memset(uvgrid, 0, grid_size * grid_size * sizeof(unsigned int));
// Set up performance counters
struct perf_counters counters;
open_perf_counters(&counters);
// Start timer
struct timespec start_time;
clock_gettime(CLOCK_REALTIME, &start_time);
uint64_t flops = 0, mem = 0;
printf("Direct DFT...(this takes a LONG time)\n");
if(iter>1) printf("Sampling every %ld points in fourier space (saves time).\n",iter);
// DFT HERE
image_dft(uvgrid, grid_size, lambda, &vis,iter);
struct timespec end_time;
clock_gettime(CLOCK_REALTIME, &end_time);
printf("\nGrid-Time: %.3f",
(double)(end_time.tv_sec - start_time.tv_sec) +
(double)(end_time.tv_nsec - start_time.tv_nsec) / 1000000000);
//Lets get some memory stats:
getrusage(RUSAGE_SELF, rusage_cp);
printf("\nMaximum Grid Memory: %.2f GB", (float)rusage_cp->ru_maxrss/(1024*1024));
// Show performance counters after gridding
printf("\nCounters:\n");
print_perf_counters(&counters, flops, mem);
// Make hermitian
printf("\nMake hermitian...\n");
make_hermitian(uvgrid, grid_size);
if (image_fd != -1) {
printf("FFT...\n");
// First shift zero frequency
fft_shift(uvgrid, grid_size);
// Do DFT. Complex-to-complex to keep with numpy (TODO: optimize)
fftw_plan plan;
plan = fftw_plan_dft_2d(grid_size, grid_size, uvgrid, uvgrid, -1, FFTW_ESTIMATE);
fftw_execute_dft(plan, uvgrid, uvgrid);
// Shift zero frequency back into centre
fft_shift(uvgrid, grid_size);
// Write real part to disk
printf("Write image...\n");
int i;
double *row = malloc(sizeof(double) * grid_size);
for (i = 0; i < grid_size; i++) {
int j;
for (j = 0; j < grid_size; j++) {
row[j] = creal(uvgrid[i*grid_size+j]);
}
write(image_fd, row, sizeof(double) * grid_size);
}
close(image_fd);
}
getrusage(RUSAGE_SELF, rusage_cp);
printf("\nMax Memory: %.2f GB", (float)rusage_cp->ru_maxrss/(1024*1024));
return 0;
}
|
GB_unaryop__lnot_int16_uint16.c
|
//------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_uint16
// op(A') function: GB_tran__lnot_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_uint16
(
int16_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Teste1.c
|
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int nthreads, tid;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
printf("Welcome to GFC from thread = %d\n", tid);
if(tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of THR: %d\n", nthreads);
}
}
}
|
residualbased_block_builder_and_solver.h
|
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/variable_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "utilities/builtin_timer.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @tparam TSparseSpace The sparse system considered
* @tparam TDenseSpace The dense system considered
* @tparam TLinearSolver The linear solver considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Definition of the flags
KRATOS_DEFINE_LOCAL_FLAG( SILENT_WARNINGS );
// Scaling enum
enum class SCALING_DIAGONAL {NO_SCALING = 0, CONSIDER_NORM_DIAGONAL = 1, CONSIDER_MAX_DIAGONAL = 2, CONSIDER_PRESCRIBED_DIAGONAL = 3};
/// Definition of the pointer
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = rModelPart.ElementsBegin();
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Contributions to the system
LocalSystemMatrixType lhs_contribution(0, 0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements, nconditions, lhs_contribution, equation_id )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active)
{
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time LHS: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished parallel building LHS" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
virtual void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(rb.size());
InternalSystemSolveWithPhysics(rA, Dxmodified, rb, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, rDx);
} else {
InternalSystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedBlockBuilderAndSolver", mOptions.IsNot(SILENT_WARNINGS)) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time Linearizing with the database at the old iteration
* @details It is ideally the fastest and safer function to use when it is possible to solve just after building
* @param pScheme The pointer to the integration scheme
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rDx The vector of unkowns
* @param rb The RHS vector of the system of equations
* @param MoveMesh tells if the update of the scheme needs to be performed when calling the Update of the scheme
*/
void BuildAndSolveLinearizedOnPreviousIteration(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh
) override
{
KRATOS_INFO_IF("BlockBuilderAndSolver", this->GetEchoLevel() > 0)
<< "Linearizing on Old iteration" << std::endl;
KRATOS_ERROR_IF(rModelPart.GetBufferSize() == 1) << "BlockBuilderAndSolver: \n"
<< "The buffer size needs to be at least 2 in order to use \n"
<< "BuildAndSolveLinearizedOnPreviousIteration \n"
<< "current buffer size for modelpart: " << rModelPart.Name() << std::endl
<< "is :" << rModelPart.GetBufferSize()
<< " Please set IN THE STRATEGY SETTINGS "
<< " UseOldStiffnessInFirstIteration=false " << std::endl;
DofsArrayType fixed_dofs;
for(auto& r_dof : BaseType::mDofSet){
if(r_dof.IsFixed()){
fixed_dofs.push_back(&r_dof);
r_dof.FreeDof();
}
}
//TODO: Here we need to take the vector from other ones because
// We cannot create a trilinos vector without a communicator. To be improved!
TSystemVectorType dx_prediction(rDx);
TSystemVectorType rhs_addition(rb); //we know it is zero here, so we do not need to set it
// Here we bring back the database to before the prediction,
// but we store the prediction increment in dx_prediction.
// The goal is that the stiffness is computed with the
// converged configuration at the end of the previous step.
const auto it_dof_begin = BaseType::mDofSet.begin();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) {
auto it_dof = it_dof_begin + i;
//NOTE: this is initialzed to - the value of dx prediction
dx_prediction[it_dof->EquationId()] = -(it_dof->GetSolutionStepValue() - it_dof->GetSolutionStepValue(1));
}
// Use UpdateDatabase to bring back the solution to how it was at the end of the previous step
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
this->Build(pScheme, rModelPart, rA, rb);
// Put back the prediction into the database
TSparseSpace::InplaceMult(dx_prediction, -1.0); //change sign to dx_prediction
TSparseSpace::UnaliasedAdd(rDx, 1.0, dx_prediction);
// Use UpdateDatabase to bring back the solution
// to where it was taking into account BCs
// it is done here so that constraints are correctly taken into account right after
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
// Apply rb -= A*dx_prediction
TSparseSpace::Mult(rA, dx_prediction, rhs_addition);
TSparseSpace::UnaliasedAdd(rb, -1.0, rhs_addition);
for(auto& dof : fixed_dofs)
dof.FixDof();
if (!rModelPart.MasterSlaveConstraints().empty()) {
this->ApplyConstraints(pScheme, rModelPart, rA, rb);
}
this->ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
this->SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyRHSConstraints");
ApplyRHSConstraints(pScheme, rModelPart, rb);
Timer::Stop("ApplyRHSConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("BuildRHS");
BuildRHSNoDirichlet(pScheme,rModelPart,b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0;
}
Timer::Stop("BuildRHS");
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids.
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
const std::size_t system_size = rA.size1();
Vector scaling_factors (system_size);
const auto it_dof_iterator_begin = BaseType::mDofSet.begin();
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
// NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
auto it_dof_iterator = it_dof_iterator_begin + k;
if (it_dof_iterator->IsFixed()) {
scaling_factors[k] = 0.0;
} else {
scaling_factors[k] = 1.0;
}
}
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// The diagonal considered
mScaleFactor = GetScaleNorm(rModelPart, rA);
// Detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel firstprivate(system_size)
{
std::size_t col_begin = 0, col_end = 0;
bool empty = true;
#pragma omp for
for (int k = 0; k < static_cast<int>(system_size); ++k) {
col_begin = Arow_indices[k];
col_end = Arow_indices[k + 1];
empty = true;
for (std::size_t j = col_begin; j < col_end; ++j) {
if(Avalues[j] != 0.0) {
empty = false;
break;
}
}
if(empty) {
rA(k, k) = mScaleFactor;
rb[k] = 0.0;
}
}
}
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k) {
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
const double k_factor = scaling_factors[k];
if (k_factor == 0.0) {
// Zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k )
Avalues[j] = 0.0;
// Zero out the RHS
rb[k] = 0.0;
} else {
// Zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
}
}
/**
* @brief Applies the constraints with master-slave relation matrix (RHS only)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rb The RHS vector
*/
void ApplyRHSConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints with master-slave relation matrix
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
const double max_diag = GetMaxDiagonal(rA);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "block_builder_and_solver",
"block_builder" : true,
"diagonal_values_for_dirichlet_dofs" : "use_max_diagonal",
"silent_warnings" : false
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "block_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
double mScaleFactor = 1.0; /// The manuallyset scale factor
SCALING_DIAGONAL mScalingDiagonal; /// We identify the scaling considered for the dirichlet dofs
Flags mOptions; /// Some flags used internally
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
Timer::Start("ConstraintsRelationMatrixStructure");
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].SetLock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].UnSetLock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mT.size1()); ++i) {
const IndexType row_begin = Trow_indices[i];
const IndexType row_end = Trow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
}
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
#pragma omp atomic
r_value += constant_value;
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId(*i_element, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->EquationId(*i_condition, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
if (rModelPart.MasterSlaveConstraints().size() != 0) {
Element::EquationIdVectorType master_ids(3, 0);
Element::EquationIdVectorType slave_ids(3, 0);
const int nmasterSlaveConstraints = rModelPart.MasterSlaveConstraints().size();
const auto const_begin = rModelPart.MasterSlaveConstraints().begin();
#pragma omp parallel for firstprivate(nmasterSlaveConstraints, slave_ids, master_ids)
for (int iii = 0; iii<nmasterSlaveConstraints; ++iii) {
auto i_const = const_begin + iii;
i_const->EquationIdVector(slave_ids, master_ids, CurrentProcessInfo);
for (std::size_t i = 0; i < slave_ids.size(); i++) {
lock_array[slave_ids[i]].SetLock();
auto& row_indices = indices[slave_ids[i]];
row_indices.insert(slave_ids[i]);
lock_array[slave_ids[i]].UnSetLock();
}
for (std::size_t i = 0; i < master_ids.size(); i++) {
lock_array[master_ids[i]].SetLock();
auto& row_indices = indices[master_ids[i]];
row_indices.insert(master_ids[i]);
lock_array[master_ids[i]].UnSetLock();
}
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& rA,
const LocalSystemMatrixType& rLHSContribution,
Element::EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; i_local++) {
const IndexType i_global = rEquationId[i_local];
AssembleRowContribution(rA, rLHSContribution, i_global, i_local, rEquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
#pragma omp atomic
r_a += v_a;
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
last_found = id_to_find;
last_pos = pos;
}
}
/**
* @brief This method returns the scale norm considering for scaling the diagonal
* @param rModelPart The problem model part
* @param rA The LHS matrix
* @return The scale norm
*/
double GetScaleNorm(
ModelPart& rModelPart,
TSystemMatrixType& rA
)
{
switch (mScalingDiagonal) {
case SCALING_DIAGONAL::NO_SCALING:
return 1.0;
case SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL: {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
KRATOS_ERROR_IF_NOT(r_current_process_info.Has(BUILD_SCALE_FACTOR)) << "Scale factor not defined at process info" << std::endl;
return r_current_process_info.GetValue(BUILD_SCALE_FACTOR);
}
case SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL:
return GetDiagonalNorm(rA)/static_cast<double>(rA.size1());
case SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL:
return GetMaxDiagonal(rA);
// return TSparseSpace::TwoNorm(rA)/static_cast<double>(rA.size1());
default:
return GetMaxDiagonal(rA);
}
}
/**
* @brief This method returns the diagonal norm considering for scaling the diagonal
* @param rA The LHS matrix
* @return The diagonal norm
*/
double GetDiagonalNorm(TSystemMatrixType& rA)
{
double diagonal_norm = 0.0;
#pragma omp parallel for reduction(+:diagonal_norm)
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
diagonal_norm += std::pow(rA(i,i), 2);
}
return std::sqrt(diagonal_norm);
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetAveragevalueDiagonal(TSystemMatrixType& rA)
{
return 0.5 * (GetMaxDiagonal(rA) + GetMinDiagonal(rA));
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetMaxDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double max_diag = 0.0;
// #pragma omp parallel for reduction(max:max_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// max_diag = std::max(max_diag, std::abs(rA(i,i)));
// }
// return max_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector max_vector(num_threads, 0.0);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii > max_vector[id])
max_vector[id] = abs_value_ii;
}
double max_diag = 0.0;
for(int i = 0; i < num_threads; ++i) {
max_diag = std::max(max_diag, max_vector[i]);
}
return max_diag;
}
/**
* @brief This method returns the diagonal min value
* @param rA The LHS matrix
* @return The diagonal min value
*/
double GetMinDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double min_diag = std::numeric_limits<double>::max();
// #pragma omp parallel for reduction(min:min_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// min_diag = std::min(min_diag, std::abs(rA(i,i)));
// }
// return min_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector min_vector(num_threads, std::numeric_limits<double>::max());
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii < min_vector[id])
min_vector[id] = abs_value_ii;
}
double min_diag = std::numeric_limits<double>::max();
for(int i = 0; i < num_threads; ++i) {
min_diag = std::min(min_diag, min_vector[i]);
}
return min_diag;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Setting flags<
const std::string& r_diagonal_values_for_dirichlet_dofs = ThisParameters["diagonal_values_for_dirichlet_dofs"].GetString();
std::set<std::string> available_options_for_diagonal = {"no_scaling","use_max_diagonal","use_diagonal_norm","defined_in_process_info"};
if (available_options_for_diagonal.find(r_diagonal_values_for_dirichlet_dofs) == available_options_for_diagonal.end()) {
std::stringstream msg;
msg << "Currently prescribed diagonal values for dirichlet dofs : " << r_diagonal_values_for_dirichlet_dofs << "\n";
msg << "Admissible values for the diagonal scaling are : no_scaling, use_max_diagonal, use_diagonal_norm, or defined_in_process_info" << "\n";
KRATOS_ERROR << msg.str() << std::endl;
}
// The first option will not consider any scaling (the diagonal values will be replaced with 1)
if (r_diagonal_values_for_dirichlet_dofs == "no_scaling") {
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_max_diagonal") {
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_diagonal_norm") { // On this case the norm of the diagonal will be considered
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL;
} else { // Otherwise we will assume we impose a numerical value
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL;
}
mOptions.Set(SILENT_WARNINGS, ThisParameters["silent_warnings"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpace, class TDenseSpace, class TLinearSolver>
const Kratos::Flags ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::SILENT_WARNINGS(Kratos::Flags::Create(0));
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
box2d3r.c
|
#define BENCH_DIM 2
#define BENCH_FPP 97
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.01530f * A[t%2][i-3][j-3] +
0.01531f * A[t%2][i-3][j-2] +
0.01532f * A[t%2][i-3][j-1] +
0.01533f * A[t%2][i-3][j] +
0.01534f * A[t%2][i-3][j+1] +
0.01535f * A[t%2][i-3][j+2] +
0.01536f * A[t%2][i-3][j+3] +
0.01537f * A[t%2][i-2][j-3] +
0.01538f * A[t%2][i-2][j-2] +
0.01539f * A[t%2][i-2][j-1] +
0.01540f * A[t%2][i-2][j] +
0.01541f * A[t%2][i-2][j+1] +
0.01542f * A[t%2][i-2][j+2] +
0.01543f * A[t%2][i-2][j+3] +
0.01544f * A[t%2][i-1][j-3] +
0.01545f * A[t%2][i-1][j-2] +
0.01546f * A[t%2][i-1][j-1] +
0.01546f * A[t%2][i-1][j] +
0.01547f * A[t%2][i-1][j+1] +
0.01548f * A[t%2][i-1][j+2] +
0.01549f * A[t%2][i-1][j+3] +
0.01550f * A[t%2][i][j-3] +
0.01551f * A[t%2][i][j-2] +
0.01552f * A[t%2][i][j-1] +
0.25424f * A[t%2][i][j] +
0.01554f * A[t%2][i][j+1] +
0.01555f * A[t%2][i][j+2] +
0.01556f * A[t%2][i][j+3] +
0.01557f * A[t%2][i+1][j-3] +
0.01558f * A[t%2][i+1][j-2] +
0.01559f * A[t%2][i+1][j-1] +
0.01560f * A[t%2][i+1][j] +
0.01561f * A[t%2][i+1][j+1] +
0.01562f * A[t%2][i+1][j+2] +
0.01564f * A[t%2][i+1][j+3] +
0.01565f * A[t%2][i+2][j-3] +
0.01566f * A[t%2][i+2][j-2] +
0.01567f * A[t%2][i+2][j-1] +
0.01568f * A[t%2][i+2][j] +
0.01569f * A[t%2][i+2][j+1] +
0.01570f * A[t%2][i+2][j+2] +
0.01571f * A[t%2][i+2][j+3] +
0.01572f * A[t%2][i+3][j-3] +
0.01573f * A[t%2][i+3][j-2] +
0.01574f * A[t%2][i+3][j-1] +
0.01575f * A[t%2][i+3][j] +
0.01576f * A[t%2][i+3][j+1] +
0.01577f * A[t%2][i+3][j+2] +
0.01578f * A[t%2][i+3][j+3];
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.01530f * A[t%2][i-3][j-3] +
0.01531f * A[t%2][i-3][j-2] +
0.01532f * A[t%2][i-3][j-1] +
0.01533f * A[t%2][i-3][j] +
0.01534f * A[t%2][i-3][j+1] +
0.01535f * A[t%2][i-3][j+2] +
0.01536f * A[t%2][i-3][j+3] +
0.01537f * A[t%2][i-2][j-3] +
0.01538f * A[t%2][i-2][j-2] +
0.01539f * A[t%2][i-2][j-1] +
0.01540f * A[t%2][i-2][j] +
0.01541f * A[t%2][i-2][j+1] +
0.01542f * A[t%2][i-2][j+2] +
0.01543f * A[t%2][i-2][j+3] +
0.01544f * A[t%2][i-1][j-3] +
0.01545f * A[t%2][i-1][j-2] +
0.01546f * A[t%2][i-1][j-1] +
0.01546f * A[t%2][i-1][j] +
0.01547f * A[t%2][i-1][j+1] +
0.01548f * A[t%2][i-1][j+2] +
0.01549f * A[t%2][i-1][j+3] +
0.01550f * A[t%2][i][j-3] +
0.01551f * A[t%2][i][j-2] +
0.01552f * A[t%2][i][j-1] +
0.25424f * A[t%2][i][j] +
0.01554f * A[t%2][i][j+1] +
0.01555f * A[t%2][i][j+2] +
0.01556f * A[t%2][i][j+3] +
0.01557f * A[t%2][i+1][j-3] +
0.01558f * A[t%2][i+1][j-2] +
0.01559f * A[t%2][i+1][j-1] +
0.01560f * A[t%2][i+1][j] +
0.01561f * A[t%2][i+1][j+1] +
0.01562f * A[t%2][i+1][j+2] +
0.01564f * A[t%2][i+1][j+3] +
0.01565f * A[t%2][i+2][j-3] +
0.01566f * A[t%2][i+2][j-2] +
0.01567f * A[t%2][i+2][j-1] +
0.01568f * A[t%2][i+2][j] +
0.01569f * A[t%2][i+2][j+1] +
0.01570f * A[t%2][i+2][j+2] +
0.01571f * A[t%2][i+2][j+3] +
0.01572f * A[t%2][i+3][j-3] +
0.01573f * A[t%2][i+3][j-2] +
0.01574f * A[t%2][i+3][j-1] +
0.01575f * A[t%2][i+3][j] +
0.01576f * A[t%2][i+3][j+1] +
0.01577f * A[t%2][i+3][j+2] +
0.01578f * A[t%2][i+3][j+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
SoftMax.c
|
#include "../thnets.h"
THFloatTensor *nn_SoftMax_updateOutput(struct module *module, THFloatTensor *input)
{
THFloatTensor *output = module->output;
float *input_data, *output_data;
long nframe = 0, dim = 0, stride = 0;
long t;
if(input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
stride = 1;
}
else if(input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
stride = 1;
}
else if(input->nDimension == 3)
{
nframe = 1;
dim = input->size[0];
stride = input->size[1]*input->size[2];
}
else if(input->nDimension == 4)
{
nframe = input->size[0];
dim = input->size[1];
stride = input->size[2]*input->size[3];
}
else
THError("1D, 2D, 3D or 4D tensor expected");
THFloatTensor_resizeAs(output, input);
input_data = THFloatTensor_data(input);
output_data = THFloatTensor_data(output);
#pragma omp parallel for private(t)
for(t = 0; t < stride*nframe; t++)
{
float *input_ptr = input_data + (t/stride)*dim*stride + t % stride;
float *output_ptr = output_data + (t/stride)*dim*stride + t % stride;
float inputMax = -THInf;
float sum;
long d;
for(d = 0; d < dim; d++) {
if (input_ptr[d*stride] >= inputMax) inputMax = input_ptr[d*stride];
}
sum = 0;
for(d = 0; d < dim; d++) {
float z = THExpMinusApprox(inputMax - input_ptr[d*stride]);
output_ptr[d*stride] = z;
sum += z;
}
for(d = 0; d < dim; d++) {
output_ptr[d*stride] *= 1/sum;
}
}
return output;
}
|
GB_unop__log_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log_fp32_fp32
// op(A') function: GB_unop_tran__log_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = logf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = logf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = logf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = logf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB025-simdtruedep-var-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has race condition due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len], b[len];
#pragma omp parallel for private(i )
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
for (i=0;i<len-1;i++)
a[i+1]=a[i]*b[i];
for (i=0;i<len;i++) {
printf("%d %d\n", a[i], b[i]);
}
return 0;
}
|
sample_task_single_producer.c
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See LICENSE.txt in top-level directory.
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
int main(int argc, char *argv[]) {
int i, num = (argc > 1) ? atoi(argv[1]) : 100;
int nthreads;
struct timeval t_start, t_end;
double time;
double *a = (double *)malloc(sizeof(double) * num);
#pragma omp parallel
{ nthreads = omp_get_num_threads(); }
for (i = 0; i < num; i++) {
a[i] = i;
}
gettimeofday(&t_start, NULL);
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < num; i++) {
#pragma omp task
{ a[i] *= 0.9; }
}
}
}
gettimeofday(&t_end, NULL);
time = (t_end.tv_sec * 1000000 + t_end.tv_usec) -
(t_start.tv_sec * 1000000 + t_start.tv_usec);
printf("%d %f\n", nthreads, time / 1000000.0);
for (i = 0; i < num; i++) {
if (a[i] != i * 0.9) {
printf("a[%d]=%f != %f\n", i, a[i], i * 0.9);
return 1;
}
}
free(a);
}
|
resource_strings.h
|
#pragma once
#include <torch/csrc/jit/code_template.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cpu {
/*with type_as not checking type of its input, a fusion group can have non-fp32
tensor as input. Correct code for this case is generated, however, nvrtc does
not know how to handle int*_t integer types, so typedefs help it handle those
cases*/
static auto type_declarations_template = CodeTemplate(R"(
#define POS_INFINITY INFINITY
#define NEG_INFINITY -INFINITY
typedef ${IndexType} IndexType;
template<typename T, size_t N>
struct TensorInfo {
T* data;
IndexType sizes[N];
IndexType strides[N];
};
template<typename T>
struct TensorInfo<T, 0> {
T * data;
};
)");
static auto cpu_compilation_unit_template = CodeTemplate(R"(
#include <math.h>
#include <cstddef>
#include <cstdint>
template <typename scalar_t>
scalar_t rsqrtf(scalar_t x) {
return 1.0/sqrtf(x);
}
${type_declarations}
#define OMP_THRESHOLD 100000
static void ${kernelName}_kernel(IndexType totalElements, ${formals}) {
#pragma omp parallel for if(totalElements > OMP_THRESHOLD)
for (IndexType linearIndex = 0;
linearIndex < totalElements;
linearIndex += 1) {
// Convert `linearIndex` into an offset of tensor:
${tensorOffsets}
// calculate the results
${kernelBody}
}
}
extern "C"
void ${kernelName}(IndexType totalElements, void ** args) {
${kernelName}_kernel(totalElements ${,argument_loads});
}
)");
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
|
GB_binop__times_fc32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc32)
// A*D function (colscale): GB (_AxD__times_fc32)
// D*A function (rowscale): GB (_DxB__times_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc32)
// C=scalar+B GB (_bind1st__times_fc32)
// C=scalar+B' GB (_bind1st_tran__times_fc32)
// C=A+scalar GB (_bind2nd__times_fc32)
// C=A'+scalar GB (_bind2nd_tran__times_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_mul (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
volumeramdistancetransform.h
|
/*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2016-2020 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_VOLUMERAMDISTANCETRANSFORM_H
#define IVW_VOLUMERAMDISTANCETRANSFORM_H
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/common/inviwo.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/volume/volume.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#ifndef __clang__
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in grid index space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis,
const size3_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField,
const Matrix<3, U> basis, const size3_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifndef __clang__
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
using i64vec3 = glm::tvec3<int64>;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T *src = inVolume->getDataTyped();
U *dst = outDistanceField->getDataTyped();
const i64vec3 srcDim{inVolume->getDimensions()};
const i64vec3 dstDim{outDistanceField->getDimensions()};
const i64vec3 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]};
const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}};
const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"volumeRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal volume will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform"));
}
util::IndexMapper<3, int64> srcInd(srcDim);
util::IndexMapper<3, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y, const int64 z) {
return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#pragma omp parallel for
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] =
std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist));
}
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.3);
#pragma omp parallel
{
std::vector<U> buff;
buff.resize(dstDim.y);
#pragma omp for
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y, z)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// third pass, scan z direction
// for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ
// result: min distance in x and y direction
callback(0.6);
#pragma omp parallel
{
std::vector<U> buff;
buff.resize(dstDim.z);
#pragma omp for
for (int64 y = 0; y < dstDim.y; ++y) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 z = 0; z < dstDim.z; ++z) {
buff[z] = dst[dstInd(x, y, z)];
}
for (int64 z = 0; z < dstDim.z; ++z) {
auto d = buff[z];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1;
const auto rStart = std::min(rMax, z - 1);
const auto rEnd = std::min(rMax, dstDim.z - z);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[z + n] + squareVoxelSize.z * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// scale data
callback(0.9);
const int64 volSize = dstDim.x * dstDim.y * dstDim.z;
#pragma omp parallel for
for (int64 i = 0; i < volSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField,
const Matrix<3, U> basis, const size3_t upsample) {
util::volumeRAMDistanceTransform(
inVolume, outDistanceField, basis, upsample,
[](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U &squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale,
ProgressCallback progress) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
using ValueType = util::PrecisionValueType<decltype(vrprecision)>;
const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float &squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float &squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
#endif // IVW_VOLUMERAMDISTANCETRANSFORM_H
|
symv_x_bsr_n_hi.c
|
#include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/opt.h"
#include <string.h>
#include "stdio.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
//dignaol entry A(row+b_row,col+b_col)
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * (bs + 1)], x[col + b_row]);
for (ALPHA_INT b_col = b_row + 1; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = 0; br < b_rows; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
ALPHA_Number val_orig;
ALPHA_Number temp_orig;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_col + row], A->values[a0_idx + b_col * (bs + 1)], x[b_col + col]);
for (ALPHA_INT b_row = 0; b_row < b_col; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], tmp_y, alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
rawSHA256_fmt_plug.c
|
/*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Understands hex hashes as well as Cisco "type 4" base64.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256);
#else
#include "arch.h"
#include "sha2.h"
#include "stdint.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA256"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
/* Note: Cisco hashes are truncated at length 25. We currently ignore this. */
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#else
#define PLAINTEXT_LENGTH 125
#endif
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
static uint32_t (*saved_key);
static uint32_t (*crypt_out);
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(DIGEST_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * 8,
sizeof(*crypt_out),
MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
static void *get_binary(char *ciphertext)
{
static unsigned int *outw;
unsigned char *out;
char *p;
int i;
if (!outw)
outw = mem_calloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
out = (unsigned char*)outw;
p = ciphertext + HEX_TAG_LEN;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#ifdef SIMD_COEF_32
alter_endianity (out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(outw);
#endif
#endif
return out;
}
#ifdef SIMD_COEF_32
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_6; }
#ifdef SIMD_COEF_32
static void set_key(char *key, int index) {
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ?
key : strcpy(buf_aligned, key));
#endif
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32 *)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80U << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[15*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index) {
unsigned int i,s;
static char out[PLAINTEXT_LENGTH+1];
unsigned char *wucp = (unsigned char*)saved_key;
s = ((ARCH_WORD_32 *)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3;
for(i=0;i<s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
SIMDSHA256body(&saved_key[(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32],
&crypt_out[(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32],
NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], saved_len[index]);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((ARCH_WORD_32*) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
return ((ARCH_WORD_32*)binary)[0] == crypt_out[HASH_IDX];
#else
return *(ARCH_WORD_32*)binary == crypt_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_32 *binary = get_binary(source);
char *key = get_key(index);
SHA256_CTX ctx;
ARCH_WORD_32 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_32)];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, strlen(key));
SHA256_Final((unsigned char*)crypt_out, &ctx);
#ifdef SIMD_COEF_32
alter_endianity(crypt_out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(crypt_out);
#endif
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
}
struct fmt_main fmt_rawSHA256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
color_transforms.h
|
/****************************************************************************
**
** Copyright (C) 2017 TU Wien, ACIN, Vision 4 Robotics (V4R) group
** Contact: v4r.acin.tuwien.ac.at
**
** This file is part of V4R
**
** V4R is distributed under dual licenses - GPLv3 or closed source.
**
** GNU General Public License Usage
** V4R is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published
** by the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** V4R is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** Please review the following information to ensure the GNU General Public
** License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
**
** Commercial License Usage
** If GPL is not suitable for your project, you must purchase a commercial
** license to use V4R. Licensees holding valid commercial V4R licenses may
** use this file in accordance with the commercial license agreement
** provided with the Software or, alternatively, in accordance with the
** terms contained in a written agreement between you and TU Wien, ACIN, V4R.
** For licensing terms and conditions please contact office<at>acin.tuwien.ac.at.
**
**
** The copyright holder additionally grants the author(s) of the file the right
** to use, copy, modify, merge, publish, distribute, sublicense, and/or
** sell copies of their contributions without any restrictions.
**
****************************************************************************/
/**
* @file color_transforms.h
* @author Aitor Aldoma ([email protected])
* @date 2013
* @brief
*
*/
#pragma once
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <v4r/core/macros.h>
#include <vector>
#include <omp.h>
namespace v4r {
class V4R_EXPORTS ColorTransform {
public:
typedef std::shared_ptr<ColorTransform> Ptr;
virtual ~ColorTransform() {}
virtual Eigen::VectorXf do_conversion(unsigned char R, unsigned char G, unsigned char B) const = 0;
virtual void do_inverse_conversion(const Eigen::VectorXf &converted_color, unsigned char &R, unsigned char &G,
unsigned char &B) const {
(void)converted_color;
(void)R;
(void)G;
(void)B;
std::cerr << "Inverse conversion is not implemented!" << std::endl;
}
virtual size_t getOutputNumColorCompenents() const = 0;
template <typename PointT>
V4R_EXPORTS void convert(const pcl::PointCloud<PointT> &cloud, Eigen::MatrixXf &converted_color) const {
converted_color = Eigen::MatrixXf(cloud.points.size(), getOutputNumColorCompenents());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
const PointT &p = cloud.points[i];
unsigned char r = (unsigned char)p.r;
unsigned char g = (unsigned char)p.g;
unsigned char b = (unsigned char)p.b;
converted_color.row(i) = do_conversion(r, g, b);
}
}
};
class V4R_EXPORTS RGB2GrayScale : public ColorTransform {
public:
typedef std::shared_ptr<RGB2GrayScale> Ptr;
size_t getOutputNumColorCompenents() const {
return 1;
}
Eigen::VectorXf do_conversion(unsigned char R, unsigned char G, unsigned char B) const {
Eigen::VectorXf c(1);
c(0) = 0.2126f * R / 255.f + 0.7152f * G / 255.f + 0.0722f * B / 255.f;
return c;
}
};
} // namespace v4r
|
compute_bk.h
|
#ifndef _COMPUTE_BK_AMA_H
#define _COMPUTE_BK_AMA_H
CPS_START_NAMESPACE
//We compute B_K for a given first kaon timeslice (t0) and a fixed K->K separation for each operator insertion time. The sink kaon timeslice is t1 = (t0 + tsep) % Lt
//The matrix is indexed as [t0][(top-t0+Lt)%Lt]
//Source momenta of the strange quark props are needed for the flavor projection.
//It is assumed that the total kaon momentum is zero, and we project onto zero momentum at the operator insertion
void GparityBK(fMatrix<Rcomplex> &into, const int t0, const int t1,
const PropSiteMatrixGetter &prop_h_t0, const PropSiteMatrixGetter &prop_l_t0, const ThreeMomentum &p_psi_h_t0,
const PropSiteMatrixGetter &prop_h_t1, const PropSiteMatrixGetter &prop_l_t1, const ThreeMomentum &p_psi_h_t1,
const bool do_flav_project = true
){
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
const int nthread = omp_get_max_threads();
basicComplexArray<Rcomplex> tmp(Lt,nthread); //defaults to zero for all elements
FlavorMatrix kaon_proj_t0 = getProjector(p_psi_h_t0);
FlavorMatrix kaon_proj_t1 = getProjector(p_psi_h_t1);
int vol3d = GJP.VolNodeSites()/GJP.TnodeSites();
#pragma omp parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int pos[4];
int rem = x;
for(int i=0;i<4;i++){ pos[i] = rem % GJP.NodeSites(i); rem /= GJP.NodeSites(i); }
int x3d_lcl = x % vol3d;
int t_glb = pos[3] + GJP.TnodeCoor() * GJP.TnodeSites(); //operator insertion time
int tdis0_glb = t_glb - t0; //linear time coordinate
int tdis1_glb = t_glb - t1;
int tdis_into = (tdis0_glb + Lt)% Lt; //output time coordinate modulo Lt
SpinColorFlavorMatrix prop_l_t0_site;
prop_l_t0.siteMatrix(prop_l_t0_site,x3d_lcl,tdis0_glb);
if(do_flav_project) prop_l_t0_site *= kaon_proj_t0;
SpinColorFlavorMatrix prop_h_dag_t0_site;
prop_h_t0.siteMatrix(prop_h_dag_t0_site,x3d_lcl,tdis0_glb);
prop_h_dag_t0_site.hconj();
SpinColorFlavorMatrix prop_prod_t0 = prop_l_t0_site * prop_h_dag_t0_site;
SpinColorFlavorMatrix prop_l_t1_site;
prop_l_t1.siteMatrix(prop_l_t1_site,x3d_lcl,tdis1_glb);
if(do_flav_project) prop_l_t1_site *= kaon_proj_t1;
SpinColorFlavorMatrix prop_h_dag_t1_site;
prop_h_t1.siteMatrix(prop_h_dag_t1_site,x3d_lcl,tdis1_glb);
prop_h_dag_t1_site.hconj();
SpinColorFlavorMatrix prop_prod_t1 = prop_l_t1_site * prop_h_dag_t1_site;
for(int mu=0;mu<4;mu++){
for(int Gamma = 0; Gamma < 2; Gamma++){ //\gamma^\mu and \gamma^\mu\gamma^5
SpinColorFlavorMatrix part1 = prop_prod_t0;
if(Gamma == 1) part1.gl(-5);
part1.gl(mu);
part1.pr(F0);
SpinColorFlavorMatrix part2 = prop_prod_t1;
if(Gamma == 1) part2.gl(-5);
part2.gl(mu);
part2.pr(F0);
tmp(tdis_into, omp_get_thread_num()) += 2.0*Trace(part1)*Trace(part2);
tmp(tdis_into, omp_get_thread_num()) += -2.0*Trace(part1, part2);
}
}
}
tmp.threadSum();
tmp.nodeSum();
for(int tdis=0;tdis<Lt;tdis++)
into(t0, tdis) = tmp[tdis];
}
void StandardBK(fMatrix<Rcomplex> &into, const int t0, const int t1,
const PropSiteMatrixGetter &prop_h_t0, const PropSiteMatrixGetter &prop_l_t0,
const PropSiteMatrixGetter &prop_h_t1, const PropSiteMatrixGetter &prop_l_t1){
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
const int nthread = omp_get_max_threads();
basicComplexArray<Rcomplex> tmp(Lt,nthread); //defaults to zero for all elements
int vol3d = GJP.VolNodeSites()/GJP.TnodeSites();
#pragma omp_parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int pos[4];
int rem = x;
for(int i=0;i<4;i++){ pos[i] = rem % GJP.NodeSites(i); rem /= GJP.NodeSites(i); }
int x3d_lcl = x % vol3d;
int t_glb = pos[3] + GJP.TnodeCoor() * GJP.TnodeSites(); //operator insertion time
int tdis0_glb = t_glb - t0; //linear time coordinate
int tdis1_glb = t_glb - t1;
int tdis_into = (tdis0_glb +Lt)% Lt; //output time coordinate modulo Lt
WilsonMatrix prop_l_t0_site;
prop_l_t0.siteMatrix(prop_l_t0_site,x3d_lcl,tdis0_glb);
WilsonMatrix prop_h_dag_t0_site;
prop_h_t0.siteMatrix(prop_h_dag_t0_site,x3d_lcl,tdis0_glb);
prop_h_dag_t0_site.hconj();
WilsonMatrix prop_prod_t0 = prop_l_t0_site * prop_h_dag_t0_site;
WilsonMatrix prop_l_t1_site;
prop_l_t1.siteMatrix(prop_l_t1_site,x3d_lcl,tdis1_glb);
WilsonMatrix prop_h_dag_t1_site;
prop_h_t1.siteMatrix(prop_h_dag_t1_site,x3d_lcl,tdis1_glb);
prop_h_dag_t1_site.hconj();
WilsonMatrix prop_prod_t1 = prop_l_t1_site * prop_h_dag_t1_site;
for(int mu=0;mu<4;mu++){
for(int Gamma = 0; Gamma < 2; Gamma++){ //\gamma^\mu and \gamma^\mu\gamma^5
WilsonMatrix part1 = prop_prod_t0;
if(Gamma == 1) part1.gl(-5);
part1.gl(mu);
WilsonMatrix part2 = prop_prod_t1;
if(Gamma == 1) part2.gl(-5);
part2.gl(mu);
tmp(tdis_into, omp_get_thread_num()) += 2.0*Trace(part1)*Trace(part2);
tmp(tdis_into, omp_get_thread_num()) += -2.0*Trace(part1, part2);
}
}
}
tmp.threadSum();
tmp.nodeSum();
for(int tdis=0;tdis<Lt;tdis++)
into(t0, tdis) = tmp[tdis];
}
inline void getBKsnkPropBcAndWrapperTsnk(TbcStatus &time_bc_t1, int &t1, const TbcStatus &time_bc_t0, const int t0, const int tsep){
const int Lt = GJP.Tnodes()*GJP.TnodeSites();
time_bc_t1 = time_bc_t0;
t1 = t0 + tsep;
if(t1 >= Lt){
if(time_bc_t0.isCombinedType()){ //Use F(t+Lt) = B(t) and B(t+Lt) = F(t)
time_bc_t1.swapTbcCombination();
t1 -= Lt;
}else if(time_bc_t0.getSingleBc() == BND_CND_PRD){
t1 -= Lt;
}else if(time_bc_t0.getSingleBc() == BND_CND_APRD){
ERR.General("","getBKsnkPropBcAndWrapperTsnk","- sign from tsnk prop crossing boundary not implemented yet\n"); //G(t-Lt) = -G(t), need to pass the minus sign into the function
}
}
assert(t1>=0 && t1<Lt);
}
CPS_END_NAMESPACE
#endif
|
kmp_csupport.c
|
/*
* kmp_csupport.c -- kfront linkage support for OpenMP.
*/
/* <copyright>
Copyright (c) 1997-2016 Intel Corporation. All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
#include "omp.h" /* extern "C" declarations of user-visible routines */
#include "kmp.h"
#include "kmp_i18n.h"
#include "kmp_itt.h"
#include "kmp_error.h"
#include "kmp_stats.h"
#if OMPT_SUPPORT
#include "ompt-internal.h"
#include "ompt-specific.h"
#endif
#define MAX_MESSAGE 512
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/* flags will be used in future, e.g., to implement */
/* openmp_strict library restrictions */
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc in source location information
* @param flags in for future use (currently ignored)
*
* Initialize the runtime library. This call is optional; if it is not made then
* it will be implicitly called by attempts to use other library functions.
*
*/
void
__kmpc_begin(ident_t *loc, kmp_int32 flags)
{
// By default __kmp_ignore_mppbeg() returns TRUE.
if (__kmp_ignore_mppbeg() == FALSE) {
__kmp_internal_begin();
KC_TRACE( 10, ("__kmpc_begin: called\n" ) );
}
}
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc source location information
*
* Shutdown the runtime library. This is also optional, and even if called will not
* do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero.
*/
void
__kmpc_end(ident_t *loc)
{
// By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op.
// However, this can be overridden with KMP_IGNORE_MPPEND environment variable.
// If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end()
// will unregister this root (it can cause library shut down).
if (__kmp_ignore_mppend() == FALSE) {
KC_TRACE( 10, ("__kmpc_end: called\n" ) );
KA_TRACE( 30, ("__kmpc_end\n" ));
__kmp_internal_end_thread( -1 );
}
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The global thread index of the active thread.
This function can be called in any context.
If the runtime has ony been entered at the outermost level from a
single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that
which would be returned by omp_get_thread_num() in the outermost
active parallel construct. (Or zero if there is no active parallel
construct, since the master thread is necessarily thread zero).
If multiple non-OpenMP threads all enter an OpenMP construct then this
will be a unique thread identifier among all the threads created by
the OpenMP runtime (but the value cannote be defined in terms of
OpenMP thread ids returned by omp_get_thread_num()).
*/
kmp_int32
__kmpc_global_thread_num(ident_t *loc)
{
kmp_int32 gtid = __kmp_entry_gtid();
KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) );
return gtid;
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads under control of the OpenMP<sup>*</sup> runtime
This function can be called in any context.
It returns the total number of threads under the control of the OpenMP runtime. That is
not a number that can be determined by any OpenMP standard calls, since the library may be
called from more than one non-OpenMP thread, and this reflects the total over all such calls.
Similarly the runtime maintains underlying threads even when they are not active (since the cost
of creating and destroying OS threads is high), this call counts all such threads even if they are not
waiting for work.
*/
kmp_int32
__kmpc_global_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) );
return TCR_4(__kmp_nth);
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The thread number of the calling thread in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_thread_num(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) );
return __kmp_tid_from_gtid( __kmp_entry_gtid() );
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) );
return __kmp_entry_thread() -> th.th_team -> t.t_nproc;
}
/*!
* @ingroup DEPRECATED
* @param loc location description
*
* This function need not be called. It always returns TRUE.
*/
kmp_int32
__kmpc_ok_to_fork(ident_t *loc)
{
#ifndef KMP_DEBUG
return TRUE;
#else
const char *semi2;
const char *semi3;
int line_no;
if (__kmp_par_range == 0) {
return TRUE;
}
semi2 = loc->psource;
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2, ';');
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2 + 1, ';');
if (semi2 == NULL) {
return TRUE;
}
if (__kmp_par_range_filename[0]) {
const char *name = semi2 - 1;
while ((name > loc->psource) && (*name != '/') && (*name != ';')) {
name--;
}
if ((*name == '/') || (*name == ';')) {
name++;
}
if (strncmp(__kmp_par_range_filename, name, semi2 - name)) {
return __kmp_par_range < 0;
}
}
semi3 = strchr(semi2 + 1, ';');
if (__kmp_par_range_routine[0]) {
if ((semi3 != NULL) && (semi3 > semi2)
&& (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) {
return __kmp_par_range < 0;
}
}
if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) {
if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) {
return __kmp_par_range > 0;
}
return __kmp_par_range < 0;
}
return TRUE;
#endif /* KMP_DEBUG */
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return 1 if this thread is executing inside an active parallel region, zero if not.
*/
kmp_int32
__kmpc_in_parallel( ident_t *loc )
{
return __kmp_entry_thread() -> th.th_root -> r.r_active;
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_threads number of threads requested for this parallel construct
Set the number of threads to be used by the next fork spawned by this thread.
This call is only required if the parallel construct has a `num_threads` clause.
*/
void
__kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
global_tid, num_threads ) );
__kmp_push_num_threads( loc, global_tid, num_threads );
}
void
__kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid )
{
KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) );
/* the num_threads are automatically popped */
}
#if OMP_40_ENABLED
void
__kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind )
{
KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n",
global_tid, proc_bind ) );
__kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind );
}
#endif /* OMP_40_ENABLED */
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined parallel construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
int gtid = __kmp_entry_gtid();
#if (KMP_STATS_ENABLED)
int inParallel = __kmpc_in_parallel(loc);
if (inParallel)
{
KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL);
}
else
{
KMP_STOP_EXPLICIT_TIMER(OMP_serial);
KMP_COUNT_BLOCK(OMP_PARALLEL);
}
#endif
// maybe to save thr_state is enough here
{
va_list ap;
va_start( ap, microtask );
#if OMPT_SUPPORT
int tid = __kmp_tid_from_gtid( gtid );
kmp_info_t *master_th = __kmp_threads[ gtid ];
kmp_team_t *parent_team = master_th->th.th_team;
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
#endif
#if INCLUDE_SSC_MARKS
SSC_MARK_FORKING();
#endif
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
#if OMPT_SUPPORT
VOLATILE_CAST(void *) microtask, // "unwrapped" task
#endif
VOLATILE_CAST(microtask_t) microtask, // "wrapped" task
VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
#if INCLUDE_SSC_MARKS
SSC_MARK_JOINING();
#endif
__kmp_join_call( loc, gtid
#if OMPT_SUPPORT
, fork_context_intel
#endif
);
va_end( ap );
#if OMPT_SUPPORT
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = 0;
}
#endif
}
#if (KMP_STATS_ENABLED)
if (!inParallel)
KMP_START_EXPLICIT_TIMER(OMP_serial);
#endif
}
#if OMP_40_ENABLED
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_teams number of teams requested for the teams construct
@param num_threads number of threads per team requested for the teams construct
Set the number of teams to be used by the teams construct.
This call is only required if the teams construct has a `num_teams` clause
or a `thread_limit` clause (or both).
*/
void
__kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
global_tid, num_teams, num_threads ) );
__kmp_push_num_teams( loc, global_tid, num_teams, num_threads );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined teams construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
int gtid = __kmp_entry_gtid();
kmp_info_t *this_thr = __kmp_threads[ gtid ];
va_list ap;
va_start( ap, microtask );
KMP_COUNT_BLOCK(OMP_TEAMS);
// remember teams entry point and nesting level
this_thr->th.th_teams_microtask = microtask;
this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host
#if OMPT_SUPPORT
kmp_team_t *parent_team = this_thr->th.th_team;
int tid = __kmp_tid_from_gtid( gtid );
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
#endif
// check if __kmpc_push_num_teams called, set default number of teams otherwise
if ( this_thr->th.th_teams_size.nteams == 0 ) {
__kmp_push_num_teams( loc, gtid, 0, 0 );
}
KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
#if OMPT_SUPPORT
VOLATILE_CAST(void *) microtask, // "unwrapped" task
#endif
VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task
VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
__kmp_join_call( loc, gtid
#if OMPT_SUPPORT
, fork_context_intel
#endif
);
#if OMPT_SUPPORT
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = NULL;
}
#endif
this_thr->th.th_teams_microtask = NULL;
this_thr->th.th_teams_level = 0;
*(kmp_int64*)(&this_thr->th.th_teams_size) = 0L;
va_end( ap );
}
#endif /* OMP_40_ENABLED */
//
// I don't think this function should ever have been exported.
// The __kmpc_ prefix was misapplied. I'm fairly certain that no generated
// openmp code ever called it, but it's been exported from the RTL for so
// long that I'm afraid to remove the definition.
//
int
__kmpc_invoke_task_func( int gtid )
{
return __kmp_invoke_task_func( gtid );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Enter a serialized parallel construct. This interface is used to handle a
conditional parallel region, like this,
@code
#pragma omp parallel if (condition)
@endcode
when the condition is false.
*/
void
__kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
__kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with
* kmp_fork_call since the tasks to be done are similar in each case.
*/
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Leave a serialized parallel construct.
*/
void
__kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
kmp_internal_control_t *top;
kmp_info_t *this_thr;
kmp_team_t *serial_team;
KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) );
/* skip all this code for autopar serialized loops since it results in
unacceptable overhead */
if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) )
return;
// Not autopar code
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
this_thr = __kmp_threads[ global_tid ];
serial_team = this_thr->th.th_serial_team;
#if OMP_45_ENABLED
kmp_task_team_t * task_team = this_thr->th.th_task_team;
// we need to wait for the proxy tasks before finishing the thread
if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks )
__kmp_task_team_wait(this_thr, serial_team, NULL ); // is an ITT object needed here?
#endif
KMP_MB();
KMP_DEBUG_ASSERT( serial_team );
KMP_ASSERT( serial_team -> t.t_serialized );
KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team );
KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr );
/* If necessary, pop the internal control stack values and replace the team values */
top = serial_team -> t.t_control_stack_top;
if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) {
copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top );
serial_team -> t.t_control_stack_top = top -> next;
__kmp_free(top);
}
//if( serial_team -> t.t_serialized > 1 )
serial_team -> t.t_level--;
/* pop dispatch buffers stack */
KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer);
{
dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer;
serial_team->t.t_dispatch->th_disp_buffer =
serial_team->t.t_dispatch->th_disp_buffer->next;
__kmp_free( disp_buffer );
}
-- serial_team -> t.t_serialized;
if ( serial_team -> t.t_serialized == 0 ) {
/* return to the parallel section */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) {
__kmp_clear_x87_fpu_status_word();
__kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word );
__kmp_load_mxcsr( &serial_team->t.t_mxcsr );
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
this_thr -> th.th_team = serial_team -> t.t_parent;
this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid;
/* restore values cached in the thread */
this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */
this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */
this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized;
/* TODO the below shouldn't need to be adjusted for serialized teams */
this_thr -> th.th_dispatch = & this_thr -> th.th_team ->
t.t_dispatch[ serial_team -> t.t_master_tid ];
__kmp_pop_current_task_from_thread( this_thr );
KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 );
this_thr -> th.th_current_task -> td_flags.executing = 1;
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
// Copy the task team from the new child / old parent team to the thread.
this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
KA_TRACE(20, ("__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n",
global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
}
} else {
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n",
global_tid, serial_team, serial_team -> t.t_serialized ) );
}
}
if ( __kmp_env_consistency_check )
__kmp_pop_parallel( global_tid, NULL );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information.
Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though
depending on the memory ordering convention obeyed by the compiler
even that may not be necessary).
*/
void
__kmpc_flush(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_flush: called\n" ) );
/* need explicit __mf() here since use volatile instead in library */
KMP_MB(); /* Flush all pending memory write invalidates. */
#if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
#if KMP_MIC
// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
// We shouldn't need it, though, since the ABI rules require that
// * If the compiler generates NGO stores it also generates the fence
// * If users hand-code NGO stores they should insert the fence
// therefore no incomplete unordered stores should be visible.
#else
// C74404
// This is to address non-temporal store instructions (sfence needed).
// The clflush instruction is addressed either (mfence needed).
// Probably the non-temporal load monvtdqa instruction should also be addressed.
// mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2.
if ( ! __kmp_cpuinfo.initialized ) {
__kmp_query_cpuid( & __kmp_cpuinfo );
}; // if
if ( ! __kmp_cpuinfo.sse2 ) {
// CPU cannot execute SSE2 instructions.
} else {
#if KMP_COMPILER_ICC || KMP_COMPILER_MSVC
_mm_mfence();
#else
__sync_synchronize();
#endif // KMP_COMPILER_ICC
}; // if
#endif // KMP_MIC
#elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// Nothing to see here move along
#elif KMP_ARCH_PPC64
// Nothing needed here (we have a real MB above).
#if KMP_OS_CNK
// The flushing thread needs to yield here; this prevents a
// busy-waiting thread from saturating the pipeline. flush is
// often used in loops like this:
// while (!flag) {
// #pragma omp flush(flag)
// }
// and adding the yield here is good for at least a 10x speedup
// when running >2 threads per core (on the NAS LU benchmark).
__kmp_yield(TRUE);
#endif
#else
#error Unknown or unsupported architecture
#endif
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Execute a barrier.
*/
void
__kmpc_barrier(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_BARRIER);
KMP_TIME_BLOCK(OMP_barrier);
KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}; // if
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
__kmp_threads[ global_tid ]->th.th_ident = loc;
// TODO: explicit barrier_wait_id:
// this function is called when 'barrier' directive is present or
// implicit barrier at the end of a worksharing construct.
// 1) better to add a per-thread barrier counter to a thread data structure
// 2) set to 0 when a new team is created
// 4) no sync is required
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
}
/* The BARRIER for a MASTER section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise.
*/
kmp_int32
__kmpc_master(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_MASTER);
int status = 0;
KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) );
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
if( KMP_MASTER_GTID( global_tid )) {
KMP_START_EXPLICIT_TIMER(OMP_master);
status = 1;
}
#if OMPT_SUPPORT && OMPT_TRACE
if (status) {
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_begin)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
}
#endif
if ( __kmp_env_consistency_check ) {
#if KMP_USE_DYNAMIC_LOCK
if (status)
__kmp_push_sync( global_tid, ct_master, loc, NULL, 0 );
else
__kmp_check_sync( global_tid, ct_master, loc, NULL, 0 );
#else
if (status)
__kmp_push_sync( global_tid, ct_master, loc, NULL );
else
__kmp_check_sync( global_tid, ct_master, loc, NULL );
#endif
}
return status;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
Mark the end of a <tt>master</tt> region. This should only be called by the thread
that executes the <tt>master</tt> region.
*/
void
__kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) );
KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid ));
KMP_STOP_EXPLICIT_TIMER(OMP_master);
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_end)) {
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
if ( __kmp_env_consistency_check ) {
if( global_tid < 0 )
KMP_WARNING( ThreadIdentInvalid );
if( KMP_MASTER_GTID( global_tid ))
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
Start execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KMP_DEBUG_ASSERT( __kmp_init_serial );
KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
#if USE_ITT_BUILD
__kmp_itt_ordered_prep( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.wait_id = (uint64_t) loc;
th->th.ompt_thread_info.state = ompt_state_wait_ordered;
/* OMPT event callback */
if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_wait_ordered)(
th->th.ompt_thread_info.wait_id);
}
}
#endif
if ( th -> th.th_dispatch -> th_deo_fcn != 0 )
(*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_deo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.state = ompt_state_work_parallel;
th->th.ompt_thread_info.wait_id = 0;
/* OMPT event callback */
if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)(
th->th.ompt_thread_info.wait_id);
}
}
#endif
#if USE_ITT_BUILD
__kmp_itt_ordered_start( gtid );
#endif /* USE_ITT_BUILD */
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
End execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) );
#if USE_ITT_BUILD
__kmp_itt_ordered_end( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
if ( th -> th.th_dispatch -> th_dxo_fcn != 0 )
(*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_dxo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_release_ordered)(
th->th.ompt_thread_info.wait_id);
}
#endif
}
#if KMP_USE_DYNAMIC_LOCK
static __forceinline void
__kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag)
{
// Pointer to the allocated indirect lock is written to crit, while indexing is ignored.
void *idx;
kmp_indirect_lock_t **lck;
lck = (kmp_indirect_lock_t **)crit;
kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
KMP_SET_I_LOCK_LOCATION(ilk, loc);
KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag));
#if USE_ITT_BUILD
__kmp_itt_critical_creating(ilk->lock, loc);
#endif
int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk);
if (status == 0) {
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed(ilk->lock);
#endif
// We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit.
//KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
}
KMP_DEBUG_ASSERT(*lck != NULL);
}
// Fast-path acquire tas lock
#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
if (l->lk.poll != KMP_LOCK_FREE(tas) || \
! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
kmp_uint32 spins; \
KMP_FSYNC_PREPARE(l); \
KMP_INIT_YIELD(spins); \
if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
KMP_YIELD(TRUE); \
} else { \
KMP_YIELD_SPIN(spins); \
} \
kmp_backoff_t backoff = __kmp_spin_backoff_params; \
while (l->lk.poll != KMP_LOCK_FREE(tas) || \
! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
__kmp_spin_backoff(&backoff); \
if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
KMP_YIELD(TRUE); \
} else { \
KMP_YIELD_SPIN(spins); \
} \
} \
} \
KMP_FSYNC_ACQUIRED(l); \
}
// Fast-path test tas lock
#define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
rc = l->lk.poll == KMP_LOCK_FREE(tas) && \
KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \
}
// Fast-path release tas lock
#define KMP_RELEASE_TAS_LOCK(lock, gtid) { \
TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \
KMP_MB(); \
}
#if KMP_USE_FUTEX
# include <unistd.h>
# include <sys/syscall.h>
# ifndef FUTEX_WAIT
# define FUTEX_WAIT 0
# endif
# ifndef FUTEX_WAKE
# define FUTEX_WAKE 1
# endif
// Fast-path acquire futex lock
#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
kmp_int32 gtid_code = (gtid+1) << 1; \
KMP_MB(); \
KMP_FSYNC_PREPARE(ftx); \
kmp_int32 poll_val; \
while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \
KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \
kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \
if (!cond) { \
if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \
continue; \
} \
poll_val |= KMP_LOCK_BUSY(1, futex); \
} \
kmp_int32 rc; \
if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \
continue; \
} \
gtid_code |= 1; \
} \
KMP_FSYNC_ACQUIRED(ftx); \
}
// Fast-path test futex lock
#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) { \
KMP_FSYNC_ACQUIRED(ftx); \
rc = TRUE; \
} else { \
rc = FALSE; \
} \
}
// Fast-path release futex lock
#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
KMP_MB(); \
KMP_FSYNC_RELEASING(ftx); \
kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \
if (KMP_LOCK_STRIP(poll_val) & 1) { \
syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \
} \
KMP_MB(); \
KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \
}
#endif // KMP_USE_FUTEX
#else // KMP_USE_DYNAMIC_LOCK
static kmp_user_lock_p
__kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid )
{
kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit;
//
// Because of the double-check, the following load
// doesn't need to be volatile.
//
kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
if ( lck == NULL ) {
void * idx;
// Allocate & initialize the lock.
// Remember allocated locks in table in order to free them in __kmp_cleanup()
lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section );
__kmp_init_user_lock_with_checks( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_creating( lck );
// __kmp_itt_critical_creating() should be called *before* the first usage of underlying
// lock. It is the only place where we can guarantee it. There are chances the lock will
// destroyed with no usage, but it is not a problem, because this is not real event seen
// by user but rather setting name for object (lock). See more details in kmp_itt.h.
#endif /* USE_ITT_BUILD */
//
// Use a cmpxchg instruction to slam the start of the critical
// section with the lock pointer. If another thread beat us
// to it, deallocate the lock, and use the lock that the other
// thread allocated.
//
int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck );
if ( status == 0 ) {
// Deallocate the lock and reload the value.
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed( lck );
// Let ITT know the lock is destroyed and the same memory location may be reused for
// another purpose.
#endif /* USE_ITT_BUILD */
__kmp_destroy_user_lock_with_checks( lck );
__kmp_user_lock_free( &idx, gtid, lck );
lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
KMP_DEBUG_ASSERT( lck != NULL );
}
}
return lck;
}
#endif // KMP_USE_DYNAMIC_LOCK
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Enter code protected by a `critical` construct.
This function blocks until the executing thread can enter the critical section.
*/
void
__kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
{
#if KMP_USE_DYNAMIC_LOCK
__kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none);
#else
KMP_COUNT_BLOCK(OMP_CRITICAL);
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
//TODO: add THR_OVHD_STATE
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
/* since the critical directive binds to all threads, not just
* the current team we have to check this even if we are in a
* serialized team */
/* also, even if we are the uber thread, we still have to conduct the lock,
* as we have to contend with sibling threads */
#if USE_ITT_BUILD
__kmp_itt_critical_acquiring( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_acquire_user_lock_with_checks( lck, global_tid );
#if USE_ITT_BUILD
__kmp_itt_critical_acquired( lck );
#endif /* USE_ITT_BUILD */
KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid ));
#endif // KMP_USE_DYNAMIC_LOCK
}
#if KMP_USE_DYNAMIC_LOCK
// Converts the given hint to an internal lock implementation
static __forceinline kmp_dyna_lockseq_t
__kmp_map_hint_to_lock(uintptr_t hint)
{
#if KMP_USE_TSX
# define KMP_TSX_LOCK(seq) lockseq_##seq
#else
# define KMP_TSX_LOCK(seq) __kmp_user_lock_seq
#endif
// Hints that do not require further logic
if (hint & kmp_lock_hint_hle)
return KMP_TSX_LOCK(hle);
if (hint & kmp_lock_hint_rtm)
return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq;
if (hint & kmp_lock_hint_adaptive)
return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq;
// Rule out conflicting hints first by returning the default lock
if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended))
return __kmp_user_lock_seq;
if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative))
return __kmp_user_lock_seq;
// Do not even consider speculation when it appears to be contended
if (hint & omp_lock_hint_contended)
return lockseq_queuing;
// Uncontended lock without speculation
if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative))
return lockseq_tas;
// HLE lock for speculation
if (hint & omp_lock_hint_speculative)
return KMP_TSX_LOCK(hle);
return __kmp_user_lock_seq;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number.
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section,
or some other suitably unique value.
@param hint the lock hint.
Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation.
This function blocks until the executing thread can enter the critical section unless the hint suggests use of
speculative execution and the hardware supports it.
*/
void
__kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint )
{
KMP_COUNT_BLOCK(OMP_CRITICAL);
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint);
if (KMP_IS_D_LOCK(lckseq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint));
}
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint));
}
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#if USE_ITT_BUILD
__kmp_itt_critical_acquired( lck );
#endif /* USE_ITT_BUILD */
KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid ));
} // __kmpc_critical_with_hint
#endif // KMP_USE_DYNAMIC_LOCK
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Leave a critical section, releasing any lock that was held during its execution.
*/
void
__kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
{
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid ));
#if KMP_USE_DYNAMIC_LOCK
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
KMP_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_pop_sync(global_tid, ct_critical, loc);
}
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
KMP_ASSERT(ilk != NULL);
lck = ilk->lock;
if (__kmp_env_consistency_check) {
__kmp_pop_sync(global_tid, ct_critical, loc);
}
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit));
}
KMP_ASSERT(lck != NULL);
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_release_user_lock_with_checks( lck, global_tid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_critical)) {
ompt_callbacks.ompt_callback(ompt_event_release_critical)(
(uint64_t) lck);
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid ));
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master. The barrier is executed inside this function.
*/
kmp_int32
__kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
int status;
KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check )
__kmp_check_barrier( global_tid, ct_barrier, loc );
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL );
return (status != 0) ? 0 : 1;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Complete the execution of a combined barrier and master. This function should
only be called at the completion of the <tt>master</tt> code. Other threads will
still be waiting at the barrier and this call releases them.
*/
void
__kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid ));
__kmp_end_split_barrier ( bs_plain_barrier, global_tid );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master(nowait) construct.
The barrier is executed inside this function.
There is no equivalent "end" function, since the
*/
kmp_int32
__kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid )
{
kmp_int32 ret;
KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
ret = __kmpc_master (loc, global_tid);
if ( __kmp_env_consistency_check ) {
/* there's no __kmpc_end_master called; so the (stats) */
/* actions of __kmpc_end_master are done here */
if ( global_tid < 0 ) {
KMP_WARNING( ThreadIdentInvalid );
}
if (ret) {
/* only one thread should do the pop since only */
/* one did the push (see __kmpc_master()) */
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
return (ret);
}
/* The BARRIER for a SINGLE process section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
@return One if this thread should execute the single construct, zero otherwise.
Test whether to execute a <tt>single</tt> construct.
There are no implicit barriers in the two "single" calls, rather the compiler should
introduce an explicit barrier if it is required.
*/
kmp_int32
__kmpc_single(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_SINGLE);
kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE );
if(rc == TRUE) {
KMP_START_EXPLICIT_TIMER(OMP_single);
}
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
if (ompt_enabled) {
if (rc) {
if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id,
team->t.ompt_team_info.microtask);
}
} else {
if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_others_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
this_thr->th.ompt_thread_info.state = ompt_state_wait_single;
}
}
#endif
return rc;
}
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
Mark the end of a <tt>single</tt> construct. This function should
only be called by the thread that executed the block of code protected
by the `single` construct.
*/
void
__kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
{
__kmp_exit_single( global_tid );
KMP_STOP_EXPLICIT_TIMER(OMP_single);
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
}
/*!
@ingroup WORK_SHARING
@param loc Source location
@param global_tid Global thread id
Mark the end of a statically scheduled loop.
*/
void
__kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid )
{
KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid));
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_end)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_loop_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
if ( __kmp_env_consistency_check )
__kmp_pop_workshare( global_tid, ct_pdo, loc );
}
/*
* User routines which take C-style arguments (call by value)
* different from the Fortran equivalent routines
*/
void
ompc_set_num_threads( int arg )
{
// !!!!! TODO: check the per-task binding
__kmp_set_num_threads( arg, __kmp_entry_gtid() );
}
void
ompc_set_dynamic( int flag )
{
kmp_info_t *thread;
/* For the thread-private implementation of the internal controls */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__dynamic( thread, flag ? TRUE : FALSE );
}
void
ompc_set_nested( int flag )
{
kmp_info_t *thread;
/* For the thread-private internal controls implementation */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__nested( thread, flag ? TRUE : FALSE );
}
void
ompc_set_max_active_levels( int max_active_levels )
{
/* TO DO */
/* we want per-task implementation of this internal control */
/* For the per-thread internal controls implementation */
__kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels );
}
void
ompc_set_schedule( omp_sched_t kind, int modifier )
{
// !!!!! TODO: check the per-task binding
__kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier );
}
int
ompc_get_ancestor_thread_num( int level )
{
return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level );
}
int
ompc_get_team_size( int level )
{
return __kmp_get_team_size( __kmp_entry_gtid(), level );
}
void
kmpc_set_stacksize( int arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_stacksize_s( size_t arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_blocktime( int arg )
{
int gtid, tid;
kmp_info_t *thread;
gtid = __kmp_entry_gtid();
tid = __kmp_tid_from_gtid(gtid);
thread = __kmp_thread_from_gtid(gtid);
__kmp_aux_set_blocktime( arg, thread, tid );
}
void
kmpc_set_library( int arg )
{
// __kmp_user_set_library initializes the library if needed
__kmp_user_set_library( (enum library_type)arg );
}
void
kmpc_set_defaults( char const * str )
{
// __kmp_aux_set_defaults initializes the library if needed
__kmp_aux_set_defaults( str, KMP_STRLEN( str ) );
}
void
kmpc_set_disp_num_buffers( int arg )
{
// ignore after initialization because some teams have already
// allocated dispatch buffers
if( __kmp_init_serial == 0 && arg > 0 )
__kmp_dispatch_num_buffers = arg;
}
int
kmpc_set_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_set_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_unset_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_unset_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_get_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_get_affinity_mask_proc( proc, mask );
#endif
}
/* -------------------------------------------------------------------------- */
/*!
@ingroup THREADPRIVATE
@param loc source location information
@param gtid global thread number
@param cpy_size size of the cpy_data buffer
@param cpy_data pointer to data to be copied
@param cpy_func helper function to call for copying data
@param didit flag variable: 1=single thread; 0=not single thread
__kmpc_copyprivate implements the interface for the private data broadcast needed for
the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran).
All threads participating in the parallel region call this routine.
One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1
and all other threads should have that variable set to 0.
All threads pass a pointer to a data buffer (cpy_data) that they have built.
The OpenMP specification forbids the use of nowait on the single region when a copyprivate
clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid
race conditions, so the code generation for the single region should avoid generating a barrier
after the call to @ref __kmpc_copyprivate.
The <tt>gtid</tt> parameter is the global thread id for the current thread.
The <tt>loc</tt> parameter is a pointer to source location information.
Internal implementation: The single thread will first copy its descriptor address (cpy_data)
to a team-private location, then the other threads will each call the function pointed to by
the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer.
The cpy_func routine used for the copy and the contents of the data area defined by cpy_data
and cpy_size may be built in any fashion that will allow the copy to be done. For instance,
the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers
to the data. The cpy_func routine must interpret the cpy_data buffer appropriately.
The interface to cpy_func is as follows:
@code
void cpy_func( void *destination, void *source )
@endcode
where void *destination is the cpy_data pointer for the thread being copied to
and void *source is the cpy_data pointer for the thread being copied from.
*/
void
__kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit )
{
void **data_ptr;
KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid ));
KMP_MB();
data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data;
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid );
}
}
/* ToDo: Optimize the following two barriers into some kind of split barrier */
if (didit) *data_ptr = cpy_data;
/* This barrier is not a barrier region boundary */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
if (! didit) (*cpy_func)( cpy_data, *data_ptr );
/* Consider next barrier the user-visible barrier for barrier region boundaries */
/* Nesting checks are already handled by the single construct checks */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location)
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
}
/* -------------------------------------------------------------------------- */
#define INIT_LOCK __kmp_init_user_lock_with_checks
#define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks
#define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks
#define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed
#define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks
#define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed
#define RELEASE_LOCK __kmp_release_user_lock_with_checks
#define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks
#define TEST_LOCK __kmp_test_user_lock_with_checks
#define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks
#define DESTROY_LOCK __kmp_destroy_user_lock_with_checks
#define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks
/*
* TODO: Make check abort messages use location info & pass it
* into with_checks routines
*/
#if KMP_USE_DYNAMIC_LOCK
// internal lock initializer
static __forceinline void
__kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq)
{
if (KMP_IS_D_LOCK(seq)) {
KMP_INIT_D_LOCK(lock, seq);
#if USE_ITT_BUILD
__kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL);
#endif
} else {
KMP_INIT_I_LOCK(lock, seq);
#if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
__kmp_itt_lock_creating(ilk->lock, loc);
#endif
}
}
// internal nest lock initializer
static __forceinline void
__kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq)
{
#if KMP_USE_TSX
// Don't have nested lock implementation for speculative locks
if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive)
seq = __kmp_user_lock_seq;
#endif
switch (seq) {
case lockseq_tas:
seq = lockseq_nested_tas;
break;
#if KMP_USE_FUTEX
case lockseq_futex:
seq = lockseq_nested_futex;
break;
#endif
case lockseq_ticket:
seq = lockseq_nested_ticket;
break;
case lockseq_queuing:
seq = lockseq_nested_queuing;
break;
case lockseq_drdpa:
seq = lockseq_nested_drdpa;
break;
default:
seq = lockseq_nested_queuing;
}
KMP_INIT_I_LOCK(lock, seq);
#if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
__kmp_itt_lock_creating(ilk->lock, loc);
#endif
}
/* initialize the lock with a hint */
void
__kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
{
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint");
}
__kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
}
/* initialize the lock with a hint */
void
__kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
{
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint");
}
__kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
}
#endif // KMP_USE_DYNAMIC_LOCK
/* initialize the lock */
void
__kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_lock");
}
__kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
#else // KMP_USE_DYNAMIC_LOCK
static char const * const func = "omp_init_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_init_lock
/* initialize the lock */
void
__kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock");
}
__kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
#else // KMP_USE_DYNAMIC_LOCK
static char const * const func = "omp_init_nest_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_NESTED_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_init_nest_lock
void
__kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
kmp_user_lock_p lck;
if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
} else {
lck = (kmp_user_lock_p)user_lock;
}
__kmp_itt_lock_destroyed(lck);
# endif
KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_destroy_lock
/* destroy the lock */
void
__kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
__kmp_itt_lock_destroyed(ilk->lock);
# endif
KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_NESTED_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_destroy_nest_lock
void
__kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
KMP_COUNT_BLOCK(OMP_set_lock);
#if KMP_USE_DYNAMIC_LOCK
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object.
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
__kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
# endif
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
}
void
__kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
#endif
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
}
void
__kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
__kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
/* release the lock */
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// "fast" path implemented to fix customer performance issue
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0);
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
RELEASE_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_lock)) {
ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck);
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
}
/* release the lock */
void
__kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// "fast" path implemented to fix customer performance issue
kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock;
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
if ( --(tl->lk.depth_locked) == 0 ) {
TCW_4(tl->lk.poll, 0);
}
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
int release_status;
release_status = RELEASE_NESTED_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled) {
if (release_status == KMP_LOCK_RELEASED) {
if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)(
(uint64_t) lck);
}
} else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)(
(uint64_t) lck);
}
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
}
/* try to acquire the lock */
int
__kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
KMP_COUNT_BLOCK(OMP_test_lock);
#if KMP_USE_DYNAMIC_LOCK
int rc;
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
} else
# endif
{
rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
if (rc) {
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
# endif
return FTN_TRUE;
} else {
# if USE_ITT_BUILD
__kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
# endif
return FTN_FALSE;
}
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return ( rc ? FTN_TRUE : FTN_FALSE );
/* Can't use serial interval since not block structured */
#endif // KMP_USE_DYNAMIC_LOCK
}
/* try to acquire the lock */
int
__kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
int rc;
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
if (rc) {
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
} else {
__kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
}
# endif
return rc;
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return rc;
/* Can't use serial interval since not block structured */
#endif // KMP_USE_DYNAMIC_LOCK
}
/*--------------------------------------------------------------------------------------------------------------------*/
/*
* Interface to fast scalable reduce methods routines
*/
// keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions;
// another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then)
// AT: which solution is better?
#define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \
( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) )
#define __KMP_GET_REDUCTION_METHOD(gtid) \
( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method )
// description of the packed_reduction_method variable: look at the macros in kmp.h
// used in a critical section reduce block
static __forceinline void
__kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
// this lock was visible to a customer and to the threading profile tool as a serial overhead span
// (although it's used for an internal purpose only)
// why was it visible in previous implementation?
// should we keep it visible in new reduce block?
kmp_user_lock_p lck;
#if KMP_USE_DYNAMIC_LOCK
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
} else {
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
// We know that the fast reduction code is only emitted by Intel compilers
// with 32 byte critical sections. If there isn't enough space, then we
// have to use a pointer.
if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) {
lck = (kmp_user_lock_p)crit;
}
else {
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
KMP_DEBUG_ASSERT( lck != NULL );
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
__kmp_acquire_user_lock_with_checks( lck, global_tid );
#endif // KMP_USE_DYNAMIC_LOCK
}
// used in a critical section reduce block
static __forceinline void
__kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
kmp_user_lock_p lck;
#if KMP_USE_DYNAMIC_LOCK
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
// We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical
// sections. If there isn't enough space, then we have to use a pointer.
if ( __kmp_base_user_lock_size > 32 ) {
lck = *( (kmp_user_lock_p *) crit );
KMP_ASSERT( lck != NULL );
} else {
lck = (kmp_user_lock_p) crit;
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
__kmp_release_user_lock_with_checks( lck, global_tid );
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmp_end_critical_section_reduce_block
/* 2.a.i. Reduce Block without a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
The nowait version is used for a reduce clause with the nowait argument.
*/
kmp_int32
__kmpc_reduce_nowait(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck ) {
KMP_COUNT_BLOCK(REDUCE_nowait);
int retval = 0;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
#if OMP_40_ENABLED
kmp_team_t *team;
kmp_info_t *th;
int teams_swapped = 0, task_state;
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be used as a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
#if KMP_USE_DYNAMIC_LOCK
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 );
#else
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
#endif
#if OMP_40_ENABLED
th = __kmp_thread_from_gtid(global_tid);
if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct?
team = th->th.th_team;
if( team->t.t_level == th->th.th_teams_level ) {
// this is reduction at teams construct
KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0
// Let's swap teams temporarily for the reduction barrier
teams_swapped = 1;
th->th.th_info.ds.ds_tid = team->t.t_master_tid;
th->th.th_team = team->t.t_parent;
th->th.th_team_nproc = th->th.th_team->t.t_nproc;
th->th.th_task_team = th->th.th_team->t.t_task_team[0];
task_state = th->th.th_task_state;
th->th.th_task_state = 0;
}
}
#endif // OMP_40_ENABLED
// packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable
// the variable should be either a construct-specific or thread-specific property, not a team specific property
// (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct)
// an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?)
// (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed)
// a thread-specific variable is better regarding two issues above (next construct and extra syncs)
// a thread-specific "th_local.reduction_method" variable is used currently
// each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs)
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
// all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen)
// (it's not quite good, because the checking block has been closed by this 'pop',
// but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction)
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//AT: performance issue: a real barrier here
//AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them)
//AT: (it's not what a customer might expect specifying NOWAIT clause)
//AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer)
//AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster
// and be more in line with sense of NOWAIT
//AT: TO DO: do epcc test and compare times
// this barrier should be invisible to a customer and to the threading profile tool
// (it's neither a terminating barrier nor customer's code, it's used for an internal purpose)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers will get to __kmpc_end_reduce_nowait() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) {
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
#if OMP_40_ENABLED
if( teams_swapped ) {
// Restore thread structure
th->th.th_info.ds.ds_tid = 0;
th->th.th_team = team;
th->th.th_team_nproc = team->t.t_nproc;
th->th.th_task_team = team->t.t_task_team[task_state];
th->th.th_task_state = task_state;
}
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a reduce nowait.
*/
void
__kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( on Intel platforms only )
} else if( packed_reduction_method == atomic_reduce_block ) {
// neither master nor other workers should get here
// (code gen does not generate this call in case 2: atomic reduce block)
// actually it's better to remove this elseif at all;
// after removal this value will checked by the 'else' and will assert
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master gets here
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
/* 2.a.ii. Reduce Block with a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
A blocking reduce that includes an implicit barrier.
*/
kmp_int32
__kmpc_reduce(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck )
{
KMP_COUNT_BLOCK(REDUCE_wait);
int retval = 0;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
#if KMP_USE_DYNAMIC_LOCK
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 );
#else
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
#endif
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//case tree_reduce_block:
// this barrier should be visible to a customer and to the threading profile tool
// (it's a terminating barrier on constructs if NOWAIT not specified)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers except master will enter __kmpc_end_reduce() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) { // 0: all other workers; 1: master
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a blocking reduce.
The <tt>lck</tt> pointer must be the same as that used in the corresponding start function.
*/
void
__kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
// this barrier should be visible to a customer and to the threading profile tool
// (it's a terminating barrier on constructs if NOWAIT not specified)
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == atomic_reduce_block ) {
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master executes here (master releases all other workers)
__kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid );
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
#undef __KMP_GET_REDUCTION_METHOD
#undef __KMP_SET_REDUCTION_METHOD
/*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/
kmp_uint64
__kmpc_get_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
return thread->th.th_current_task->td_task_id;
} // __kmpc_get_taskid
kmp_uint64
__kmpc_get_parent_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
kmp_taskdata_t * parent_task;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
parent_task = thread->th.th_current_task->td_parent;
return ( parent_task == NULL ? 0 : parent_task->td_task_id );
} // __kmpc_get_parent_taskid
void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT)
{
if ( ! __kmp_init_serial ) {
__kmp_serial_initialize();
}
__kmp_place_num_sockets = nS;
__kmp_place_socket_offset = sO;
__kmp_place_num_cores = nC;
__kmp_place_core_offset = cO;
__kmp_place_num_threads_per_core = nT;
}
#if OMP_45_ENABLED
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
@param num_dims number of associated doacross loops.
@param dims info on loops bounds.
Initialize doacross loop information.
Expect compiler send us inclusive bounds,
e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
*/
void
__kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims)
{
int j, idx;
kmp_int64 last, trace_count;
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_uint32 *flags;
kmp_disp_t *pr_buf = th->th.th_dispatch;
dispatch_shared_info_t *sh_buf;
KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n",
gtid, num_dims, !team->t.t_serialized));
KMP_DEBUG_ASSERT(dims != NULL);
KMP_DEBUG_ASSERT(num_dims > 0);
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
KMP_DEBUG_ASSERT(team->t.t_nproc > 1);
idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop
sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
// Save bounds info into allocated private buffer
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL);
pr_buf->th_doacross_info =
(kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1));
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions
// Save also address of num_done in order to access it later without knowing the buffer index
pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done;
pr_buf->th_doacross_info[2] = dims[0].lo;
pr_buf->th_doacross_info[3] = dims[0].up;
pr_buf->th_doacross_info[4] = dims[0].st;
last = 5;
for( j = 1; j < num_dims; ++j ) {
kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0]
if( dims[j].st == 1 ) { // most common case
// AC: should we care of ranges bigger than LLONG_MAX? (not for now)
range_length = dims[j].up - dims[j].lo + 1;
} else {
if( dims[j].st > 0 ) {
KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo);
range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1;
} else { // negative increment
KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up);
range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1;
}
}
pr_buf->th_doacross_info[last++] = range_length;
pr_buf->th_doacross_info[last++] = dims[j].lo;
pr_buf->th_doacross_info[last++] = dims[j].up;
pr_buf->th_doacross_info[last++] = dims[j].st;
}
// Compute total trip count.
// Start with range of dims[0] which we don't need to keep in the buffer.
if( dims[0].st == 1 ) { // most common case
trace_count = dims[0].up - dims[0].lo + 1;
} else if( dims[0].st > 0 ) {
KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo);
trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1;
} else { // negative increment
KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up);
trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1;
}
for( j = 1; j < num_dims; ++j ) {
trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges
}
KMP_DEBUG_ASSERT(trace_count > 0);
// Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers)
if( idx != sh_buf->doacross_buf_idx ) {
// Shared buffer is occupied, wait for it to be free
__kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL );
}
// Check if we are the first thread. After the CAS the first thread gets 0,
// others get 1 if initialization is in progress, allocated pointer otherwise.
flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64(
(kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1);
if( flags == NULL ) {
// we are the first thread, allocate the array of flags
kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration
sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1);
} else if( (kmp_int64)flags == 1 ) {
// initialization is still in progress, need to wait
while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) {
KMP_YIELD(TRUE);
}
}
KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer
pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not
// touch shared buffer on each iteration
KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid));
}
void
__kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec)
{
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf;
kmp_int64 lo, up, st;
KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
// calculate sequential iteration number and check out-of-bounds condition
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
num_dims = pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
up = pr_buf->th_doacross_info[3];
st = pr_buf->th_doacross_info[4];
if( st == 1 ) { // most common case
if( vec[0] < lo || vec[0] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = vec[0] - lo;
} else if( st > 0 ) {
if( vec[0] < lo || vec[0] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = (kmp_uint64)(vec[0] - lo) / st;
} else { // negative increment
if( vec[0] > lo || vec[0] < up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
}
for( i = 1; i < num_dims; ++i ) {
kmp_int64 iter, ln;
kmp_int32 j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
up = pr_buf->th_doacross_info[j + 3];
st = pr_buf->th_doacross_info[j + 4];
if( st == 1 ) {
if( vec[i] < lo || vec[i] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = vec[i] - lo;
} else if( st > 0 ) {
if( vec[i] < lo || vec[i] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = (kmp_uint64)(vec[i] - lo) / st;
} else { // st < 0
if( vec[i] > lo || vec[i] < up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = (kmp_uint64)(lo - vec[i]) / (-st);
}
iter_number = iter + ln * iter_number;
}
shft = iter_number % 32; // use 32-bit granularity
iter_number >>= 5; // divided by 32
flag = 1 << shft;
while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) {
KMP_YIELD(TRUE);
}
KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n",
gtid, (iter_number<<5)+shft));
}
void
__kmpc_doacross_post(ident_t *loc, int gtid, long long *vec)
{
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf;
kmp_int64 lo, st;
KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
// calculate sequential iteration number (same as in "wait" but no out-of-bounds checks)
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
num_dims = pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
st = pr_buf->th_doacross_info[4];
if( st == 1 ) { // most common case
iter_number = vec[0] - lo;
} else if( st > 0 ) {
iter_number = (kmp_uint64)(vec[0] - lo) / st;
} else { // negative increment
iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
}
for( i = 1; i < num_dims; ++i ) {
kmp_int64 iter, ln;
kmp_int32 j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
st = pr_buf->th_doacross_info[j + 4];
if( st == 1 ) {
iter = vec[i] - lo;
} else if( st > 0 ) {
iter = (kmp_uint64)(vec[i] - lo) / st;
} else { // st < 0
iter = (kmp_uint64)(lo - vec[i]) / (-st);
}
iter_number = iter + ln * iter_number;
}
shft = iter_number % 32; // use 32-bit granularity
iter_number >>= 5; // divided by 32
flag = 1 << shft;
if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 )
KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag );
KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n",
gtid, (iter_number<<5)+shft));
}
void
__kmpc_doacross_fini(ident_t *loc, int gtid)
{
kmp_int64 num_done;
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf = th->th.th_dispatch;
KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team));
return; // nothing to do
}
num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1;
if( num_done == th->th.th_team_nproc ) {
// we are the last thread, need to free shared resources
int idx = pr_buf->th_doacross_buf_idx - 1;
dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx);
__kmp_thread_free(th, (void*)sh_buf->doacross_flags);
sh_buf->doacross_flags = NULL;
sh_buf->doacross_num_done = 0;
sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use
}
// free private resources (need to keep buffer index forever)
__kmp_thread_free(th, (void*)pr_buf->th_doacross_info);
pr_buf->th_doacross_info = NULL;
KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid));
}
#endif
// end of file //
|
GB_binop__ne_uint32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_uint32
// A.*B function (eWiseMult): GB_AemultB__ne_uint32
// A*D function (colscale): GB_AxD__ne_uint32
// D*A function (rowscale): GB_DxB__ne_uint32
// C+=B function (dense accum): GB_Cdense_accumB__ne_uint32
// C+=b function (dense accum): GB_Cdense_accumb__ne_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint32
// C=scalar+B GB_bind1st__ne_uint32
// C=scalar+B' GB_bind1st_tran__ne_uint32
// C=A+scalar GB_bind2nd__ne_uint32
// C=A'+scalar GB_bind2nd_tran__ne_uint32
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
assumes_messages_attr.c
|
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -fopenmp-version=51 -std=c99 -fms-extensions -fdouble-square-bracket-attributes -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -fopenmp-version=51 -std=c99 -fms-extensions -fdouble-square-bracket-attributes -Wno-pragma-pack %s
[[omp::directive(assumes)]]; // expected-error {{expected at least one 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism' clause for '#pragma omp assumes'}}
[[omp::directive(begin)]]; // expected-error {{expected an OpenMP directive}}
[[omp::directive(begin assumes)]]; // expected-error {{expected at least one 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism' clause for '#pragma omp begin assumes'}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes foobar)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes foobar)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(begin assumes foobar(foo 2 baz))]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(assumes foobar(foo 2 baz))]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes no_openmp(1))]]; // expected-warning {{'no_openmp' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(begin assumes no_openmp(1 2 3))]]; // expected-warning {{'no_openmp' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes no_openmp(1))]];
[[omp::directive(assumes foobar no_openmp bazbaz)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}} expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes foobar no_openmp bazbaz)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}} expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(begin assumes foobar(foo 2 baz) no_openmp bazbaz(foo 2 baz))]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(assumes foobar(foo 2 baz) no_openmp bazbaz(foo 2 baz))]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes no_openmp foobar no_openmp)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes no_openmp foobar no_openmp)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes holds(1, 2 3))]];
[[omp::directive(begin assumes holds(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes absent(1, 2 3))]];
[[omp::directive(begin assumes absent(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes contains(1, 2 3))]];
[[omp::directive(begin assumes contains(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes ext)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes ext)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes ext_123(not allowed))]]; // expected-warning {{'ext_123' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(begin assumes ext_123(not allowed))]]; // expected-warning {{'ext_123' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(end assumes)]]; // expected-error {{'#pragma omp end assumes' with no matching '#pragma omp begin assumes'}}
// TODO: we should emit a warning at least.
[[omp::directive(begin assumes ext_abc)]];
|
GB_binop__times_fc64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64)
// A*D function (colscale): GB (_AxD__times_fc64)
// D*A function (rowscale): GB (_DxB__times_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64)
// C=scalar+B GB (_bind1st__times_fc64)
// C=scalar+B' GB (_bind1st_tran__times_fc64)
// C=A+scalar GB (_bind2nd__times_fc64)
// C=A'+scalar GB (_bind2nd_tran__times_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_mul (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu_single.c
|
/*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to [email protected]
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/* global variables */
//#include "applu.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'A'
#endif
#if CLASS == 'S'
/* CLASS = S */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
/* full problem size */
#define ISIZ1 12
#define ISIZ2 12
#define ISIZ3 12
/* number of iterations and how often to print the norm */
#define ITMAX_DEFAULT 50
#define INORM_DEFAULT 50
#define DT_DEFAULT 0.5
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'W'
/* CLASS = W */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
/* full problem size */
#define ISIZ1 33
#define ISIZ2 33
#define ISIZ3 33
/* number of iterations and how often to print the norm */
#define ITMAX_DEFAULT 300
#define INORM_DEFAULT 300
#define DT_DEFAULT 1.5e-3
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'A'
/* CLASS = A */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
/* full problem size */
#define ISIZ1 64
#define ISIZ2 64
#define ISIZ3 64
/* number of iterations and how often to print the norm */
#define ITMAX_DEFAULT 250
#define INORM_DEFAULT 250
#define DT_DEFAULT 2.0
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'B'
/* CLASS = B */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
/* full problem size */
#define ISIZ1 102
#define ISIZ2 102
#define ISIZ3 102
/* number of iterations and how often to print the norm */
#define ITMAX_DEFAULT 250
#define INORM_DEFAULT 250
#define DT_DEFAULT 2.0
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'C'
/* CLASS = C */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
/* full problem size */
#define ISIZ1 162
#define ISIZ2 162
#define ISIZ3 162
/* number of iterations and how often to print the norm */
#define ITMAX_DEFAULT 250
#define INORM_DEFAULT 250
#define DT_DEFAULT 2.0
#define CONVERTDOUBLE FALSE
#endif
/*--------------------------------------------------------------------
c parameters which can be overridden in runtime config file
c isiz1,isiz2,isiz3 give the maximum size
c ipr = 1 to print out verbose information
c omega = 2.0 is correct for all classes
c tolrsd is tolerance levels for steady state residuals
c-------------------------------------------------------------------*/
#define IPR_DEFAULT 1
#define OMEGA_DEFAULT 1.2
#define TOLRSD1_DEF 1.0e-8
#define TOLRSD2_DEF 1.0e-8
#define TOLRSD3_DEF 1.0e-8
#define TOLRSD4_DEF 1.0e-8
#define TOLRSD5_DEF 1.0e-8
#define C1 1.40e+00
#define C2 0.40e+00
#define C3 1.00e-01
#define C4 1.00e+00
#define C5 1.40e+00
/*--------------------------------------------------------------------
c grid
c-------------------------------------------------------------------*/
/* common /cgcon/ */
static int nx, ny, nz;
static int nx0, ny0, nz0;
static int ist, iend;
static int jst, jend;
static int ii1, ii2;
static int ji1, ji2;
static int ki1, ki2;
static double dxi, deta, dzeta;
static double tx1, tx2, tx3;
static double ty1, ty2, ty3;
static double tz1, tz2, tz3;
/*--------------------------------------------------------------------
c dissipation
c-------------------------------------------------------------------*/
/* common /disp/ */
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp;
#define COMPILETIME "28 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *cclass, boolean *verified);
/*--------------------------------------------------------------------
c field variables and residuals
c to improve cache performance, second two dimensions padded by 1
c for even number sizes only.
c Note: corresponding array (called "v") in routines blts, buts,
c and l2norm are similarly padded
c-------------------------------------------------------------------*/
/* common /cvar/ */
static double u[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5];
static double rsd[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5];
static double frct[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5];
static double flux[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5];
/*--------------------------------------------------------------------
c output control parameters
c-------------------------------------------------------------------*/
/* common /cprcon/ */
static int ipr, inorm;
/*--------------------------------------------------------------------
c newton-raphson iteration control parameters
c-------------------------------------------------------------------*/
/* common /ctscon/ */
static int itmax, invert;
static double dt, omega, tolrsd[5], rsdnm[5], errnm[5], frc, ttotal;
/* common /cjac/ */
static double a[ISIZ1][ISIZ2][5][5];
static double b[ISIZ1][ISIZ2][5][5];
static double c[ISIZ1][ISIZ2][5][5];
static double d[ISIZ1][ISIZ2][5][5];
/*--------------------------------------------------------------------
c coefficients of the exact solution
c-------------------------------------------------------------------*/
/* common /cexact/ */
static double ce[5][13];
/*--------------------------------------------------------------------
c multi-processor common blocks
c-------------------------------------------------------------------*/
/* common /timer/ */
static double maxtime;
/*--------------------------------------------------------------------
c end of include file
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char cclass;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
#pragma omp parallel
{
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &cclass, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", cclass, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != iend) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != ist) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp for
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
#if defined(_OPENMP)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp for nowait
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
#pragma omp critical
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp barrier
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - LU Benchmark\n\n");
fp = fopen("inputlu.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputlu.data\n");
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d", &ipr, &inorm);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d", &itmax);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &omega);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf%lf%lf%lf%lf",
&tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0);
while(fgetc(fp) != '\n');
fclose(fp);
} else {
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
}
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp for
for (i = 0; i <= nx-1; i++) {
for (j = 0; j <= ny-1; j++) {
for (k = 0; k <= nz-1; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
#pragma omp parallel private(i,j,k,m)
{
#pragma omp for
for (i = 0; i < ISIZ1; i++) {
for (j = 0; j < ISIZ2; j++) {
for (k = 0; k < 5; k++) {
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
#pragma omp parallel private(istep,i,j,k,m)
{
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
#pragma omp master
printf(" Time step %4d\n", istep);
}
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
} /* end parallel */
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*cclass = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*cclass = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*cclass = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*cclass = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*cclass = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*cclass != 'U') {
printf("\n Verification being performed for cclass %1c\n", *cclass);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown cclass\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*cclass == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: [email protected]\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
// gettimeofday(&tv, (void *)0);
gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
// common/c_randdp.c
/*
*/
#if defined(USE_POW)
#define r23 pow(0.5, 23.0)
#define r46 (r23*r23)
#define t23 pow(2.0, 23.0)
#define t46 (t23*t23)
#else
#define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5)
#define r46 (r23*r23)
#define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0)
#define t46 (t23*t23)
#endif
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
double randlc (double *x, double a) {
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------*/
double t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * (*x);
x1 = (int)t1;
x2 = (*x) - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
(*x) = t3 - t46 * t4;
return (r46 * (*x));
}
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
void vranlc (int n, double *x_seed, double a, double* y) {
/* void vranlc (int n, double *x_seed, double a, double y[]) { */
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------*/
int i;
double x,t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
x = *x_seed;
/*c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------*/
for (i = 1; i <= n; i++) {
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * x;
x1 = (int)t1;
x2 = x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
x = t3 - t46 * t4;
y[i] = r46 * x;
}
*x_seed = x;
}
|
intruder.c
|
/* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256];
#if 0
= { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
#endif
void global_param_init()
{
global_params[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK;
global_params[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH;
global_params[PARAM_NUM] = PARAM_DEFAULT_NUM;
global_params[PARAM_SEED] = PARAM_DEFAULT_SEED;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
int_error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = TM_PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
int_error_t error = PDETECTOR_PROCESS(detectorPtr, data);
//P_FREE(data);
if (error) {
bool_t status = TM_PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
/*
* Initialization
*/
global_param_init();
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)SEQ_MALLOC(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
GOTO_SIM();
TIMER_T startTime;
TIMER_READ(startTime);
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
TIMER_T stopTime;
TIMER_READ(stopTime);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
GOTO_REAL();
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
SEQ_FREE(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
generator_spgemm_csc_asparse.c
|
/******************************************************************************
** Copyright (c) 2015-2019, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
unsigned int l_i;
unsigned int l_z = i_z;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_i = 0; l_i < 2; l_i++ ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_z += 2;
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
unsigned int l_k;
unsigned int l_flop_count = 0;
LIBXSMM_UNUSED(i_arch);
LIBXSMM_UNUSED(i_values);
/* loop over columns in C in generated code, we fully unroll inside each column */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset the current column in C if needed */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
assert(0 != i_column_idx);
/* loop over columns in A, rows in B and fully unroll */
for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) {
unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k];
unsigned int l_z = 0;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( l_column_elements > 0 ) {
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
/* loop over the columns of A and look for vectorization potential */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
assert(0 != i_row_idx);
/* 4 element vector might be possible */
if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) {
/* check for 256bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) {
libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z += 3;
/* check for 128bit vector instruction */
} else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* 2 element vector might be possible */
} else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) {
/* check for 128bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* scalar anyways */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
}
/* C fallback code */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* loop over the columns of A */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
3d7pt.c
|
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
indirectaccess4-orig-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (xa2 - xa1 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with distance of 12 :
indexSet[1]- indexSet[0] = 533 - 521 = 12
So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3.
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
N is 180, two iteraions with N=0 and N= 1 have loop carried dependences.
For static even scheduling, we must have at least 180 threads (180/180=1 iterations)
so iteration 0 and 1 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 533, 525, 527, 529, 531, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
#pragma omp parallel for // default static even scheduling may not trigger data race!
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
convolution_7x7_pack1ton_fp16s.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* r5 = img0.row<const __fp16>(5);
const __fp16* r6 = img0.row<const __fp16>(6);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
vfloat16m1_t _sum4 = vle16_v_f16m1(outptr0 + packn * 4, vl);
vfloat16m1_t _sum5 = vle16_v_f16m1(outptr0 + packn * 5, vl);
vfloat16m1_t _sum6 = vle16_v_f16m1(outptr0 + packn * 6, vl);
vfloat16m1_t _sum7 = vle16_v_f16m1(outptr0 + packn * 7, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k05 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k06 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[8], _k00, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[10], _k00, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[12], _k00, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[14], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[9], _k01, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[11], _k01, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[13], _k01, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[15], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[10], _k02, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[12], _k02, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[14], _k02, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[16], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[3], _k03, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[5], _k03, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[7], _k03, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[9], _k03, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[11], _k03, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[13], _k03, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[15], _k03, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[17], _k03, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[4], _k04, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[6], _k04, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[8], _k04, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[10], _k04, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[12], _k04, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[14], _k04, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[16], _k04, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[18], _k04, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[5], _k05, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[7], _k05, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[9], _k05, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[11], _k05, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[13], _k05, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[15], _k05, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[17], _k05, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[19], _k05, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[6], _k06, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[8], _k06, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[10], _k06, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[12], _k06, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[14], _k06, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[16], _k06, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[18], _k06, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[20], _k06, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k15 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k16 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[8], _k10, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[10], _k10, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[12], _k10, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[14], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[9], _k11, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[11], _k11, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[13], _k11, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[15], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[10], _k12, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[12], _k12, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[14], _k12, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[16], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[3], _k13, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[5], _k13, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[7], _k13, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[9], _k13, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[11], _k13, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[13], _k13, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[15], _k13, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[17], _k13, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[4], _k14, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[6], _k14, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[8], _k14, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[10], _k14, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[12], _k14, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[14], _k14, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[16], _k14, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[18], _k14, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[5], _k15, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[7], _k15, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[9], _k15, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[11], _k15, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[13], _k15, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[15], _k15, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[17], _k15, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[19], _k15, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[6], _k16, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[8], _k16, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[10], _k16, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[12], _k16, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[14], _k16, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[16], _k16, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[18], _k16, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[20], _k16, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k25 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k26 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[8], _k20, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[10], _k20, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[12], _k20, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[14], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[9], _k21, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[11], _k21, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[13], _k21, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[15], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[10], _k22, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[12], _k22, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[14], _k22, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[16], _k22, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[3], _k23, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[5], _k23, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[7], _k23, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[9], _k23, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[11], _k23, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[13], _k23, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[15], _k23, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[17], _k23, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[4], _k24, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[6], _k24, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[8], _k24, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[10], _k24, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[12], _k24, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[14], _k24, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[16], _k24, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[18], _k24, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[5], _k25, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[7], _k25, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[9], _k25, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[11], _k25, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[13], _k25, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[15], _k25, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[17], _k25, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[19], _k25, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[6], _k26, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[8], _k26, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[10], _k26, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[12], _k26, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[14], _k26, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[16], _k26, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[18], _k26, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[20], _k26, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k35 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k36 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r3[0], _k30, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[2], _k30, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[4], _k30, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[6], _k30, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[8], _k30, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[10], _k30, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[12], _k30, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[14], _k30, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[1], _k31, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[3], _k31, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[5], _k31, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[7], _k31, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[9], _k31, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[11], _k31, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[13], _k31, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[15], _k31, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[2], _k32, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[4], _k32, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[6], _k32, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[8], _k32, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[10], _k32, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[12], _k32, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[14], _k32, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[16], _k32, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[3], _k33, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[5], _k33, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[7], _k33, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[9], _k33, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[11], _k33, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[13], _k33, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[15], _k33, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[17], _k33, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[4], _k34, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[6], _k34, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[8], _k34, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[10], _k34, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[12], _k34, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[14], _k34, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[16], _k34, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[18], _k34, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[5], _k35, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[7], _k35, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[9], _k35, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[11], _k35, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[13], _k35, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[15], _k35, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[17], _k35, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[19], _k35, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[6], _k36, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[8], _k36, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[10], _k36, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[12], _k36, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r3[14], _k36, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r3[16], _k36, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r3[18], _k36, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r3[20], _k36, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k45 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k46 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r4[0], _k40, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[2], _k40, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[4], _k40, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[6], _k40, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[8], _k40, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[10], _k40, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[12], _k40, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[14], _k40, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[1], _k41, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[3], _k41, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[5], _k41, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[7], _k41, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[9], _k41, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[11], _k41, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[13], _k41, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[15], _k41, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[2], _k42, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[4], _k42, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[6], _k42, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[8], _k42, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[10], _k42, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[12], _k42, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[14], _k42, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[16], _k42, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[3], _k43, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[5], _k43, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[7], _k43, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[9], _k43, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[11], _k43, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[13], _k43, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[15], _k43, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[17], _k43, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[4], _k44, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[6], _k44, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[8], _k44, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[10], _k44, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[12], _k44, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[14], _k44, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[16], _k44, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[18], _k44, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[5], _k45, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[7], _k45, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[9], _k45, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[11], _k45, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[13], _k45, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[15], _k45, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[17], _k45, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[19], _k45, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[6], _k46, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[8], _k46, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[10], _k46, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[12], _k46, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r4[14], _k46, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r4[16], _k46, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r4[18], _k46, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r4[20], _k46, vl);
vfloat16m1_t _k50 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k51 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k52 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k53 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k54 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k55 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k56 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r5[0], _k50, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[2], _k50, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[4], _k50, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[6], _k50, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[8], _k50, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[10], _k50, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[12], _k50, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[14], _k50, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[1], _k51, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[3], _k51, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[5], _k51, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[7], _k51, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[9], _k51, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[11], _k51, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[13], _k51, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[15], _k51, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[2], _k52, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[4], _k52, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[6], _k52, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[8], _k52, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[10], _k52, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[12], _k52, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[14], _k52, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[16], _k52, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[3], _k53, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[5], _k53, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[7], _k53, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[9], _k53, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[11], _k53, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[13], _k53, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[15], _k53, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[17], _k53, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[4], _k54, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[6], _k54, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[8], _k54, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[10], _k54, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[12], _k54, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[14], _k54, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[16], _k54, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[18], _k54, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[5], _k55, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[7], _k55, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[9], _k55, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[11], _k55, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[13], _k55, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[15], _k55, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[17], _k55, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[19], _k55, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[6], _k56, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[8], _k56, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[10], _k56, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[12], _k56, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r5[14], _k56, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r5[16], _k56, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r5[18], _k56, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r5[20], _k56, vl);
vfloat16m1_t _k60 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k61 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k62 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k63 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k64 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k65 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k66 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f16m1(_sum0, r6[0], _k60, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[2], _k60, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[4], _k60, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[6], _k60, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[8], _k60, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[10], _k60, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[12], _k60, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[14], _k60, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[1], _k61, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[3], _k61, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[5], _k61, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[7], _k61, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[9], _k61, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[11], _k61, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[13], _k61, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[15], _k61, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[2], _k62, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[4], _k62, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[6], _k62, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[8], _k62, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[10], _k62, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[12], _k62, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[14], _k62, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[16], _k62, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[3], _k63, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[5], _k63, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[7], _k63, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[9], _k63, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[11], _k63, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[13], _k63, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[15], _k63, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[17], _k63, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[4], _k64, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[6], _k64, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[8], _k64, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[10], _k64, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[12], _k64, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[14], _k64, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[16], _k64, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[18], _k64, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[5], _k65, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[7], _k65, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[9], _k65, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[11], _k65, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[13], _k65, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[15], _k65, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[17], _k65, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[19], _k65, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[6], _k66, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[8], _k66, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[10], _k66, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[12], _k66, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r6[14], _k66, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r6[16], _k66, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r6[18], _k66, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r6[20], _k66, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl);
vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl);
vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl);
vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
r5 += 16;
r6 += 16;
}
for (; j + 3 < outw; j += 4)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k05 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k06 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[3], _k03, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[5], _k03, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[7], _k03, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[9], _k03, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[4], _k04, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[6], _k04, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[8], _k04, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[10], _k04, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[5], _k05, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[7], _k05, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[9], _k05, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[11], _k05, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[6], _k06, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[8], _k06, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[10], _k06, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[12], _k06, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k15 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k16 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[3], _k13, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[5], _k13, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[7], _k13, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[9], _k13, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[4], _k14, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[6], _k14, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[8], _k14, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[10], _k14, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[5], _k15, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[7], _k15, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[9], _k15, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[11], _k15, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[6], _k16, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[8], _k16, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[10], _k16, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[12], _k16, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k25 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k26 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[3], _k23, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[5], _k23, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[7], _k23, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[9], _k23, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[4], _k24, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[6], _k24, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[8], _k24, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[10], _k24, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[5], _k25, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[7], _k25, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[9], _k25, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[11], _k25, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[6], _k26, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[8], _k26, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[10], _k26, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[12], _k26, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k35 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k36 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r3[0], _k30, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[2], _k30, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[4], _k30, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[6], _k30, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[1], _k31, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[3], _k31, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[5], _k31, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[7], _k31, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[2], _k32, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[4], _k32, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[6], _k32, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[8], _k32, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[3], _k33, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[5], _k33, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[7], _k33, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[9], _k33, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[4], _k34, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[6], _k34, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[8], _k34, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[10], _k34, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[5], _k35, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[7], _k35, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[9], _k35, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[11], _k35, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[6], _k36, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r3[8], _k36, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r3[10], _k36, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r3[12], _k36, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k45 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k46 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r4[0], _k40, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[2], _k40, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[4], _k40, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[6], _k40, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[1], _k41, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[3], _k41, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[5], _k41, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[7], _k41, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[2], _k42, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[4], _k42, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[6], _k42, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[8], _k42, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[3], _k43, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[5], _k43, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[7], _k43, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[9], _k43, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[4], _k44, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[6], _k44, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[8], _k44, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[10], _k44, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[5], _k45, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[7], _k45, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[9], _k45, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[11], _k45, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[6], _k46, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r4[8], _k46, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r4[10], _k46, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r4[12], _k46, vl);
vfloat16m1_t _k50 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k51 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k52 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k53 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k54 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k55 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k56 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r5[0], _k50, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[2], _k50, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[4], _k50, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[6], _k50, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[1], _k51, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[3], _k51, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[5], _k51, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[7], _k51, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[2], _k52, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[4], _k52, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[6], _k52, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[8], _k52, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[3], _k53, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[5], _k53, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[7], _k53, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[9], _k53, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[4], _k54, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[6], _k54, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[8], _k54, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[10], _k54, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[5], _k55, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[7], _k55, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[9], _k55, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[11], _k55, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[6], _k56, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r5[8], _k56, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r5[10], _k56, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r5[12], _k56, vl);
vfloat16m1_t _k60 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k61 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k62 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k63 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k64 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k65 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k66 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f16m1(_sum0, r6[0], _k60, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[2], _k60, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[4], _k60, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[6], _k60, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[1], _k61, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[3], _k61, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[5], _k61, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[7], _k61, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[2], _k62, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[4], _k62, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[6], _k62, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[8], _k62, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[3], _k63, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[5], _k63, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[7], _k63, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[9], _k63, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[4], _k64, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[6], _k64, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[8], _k64, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[10], _k64, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[5], _k65, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[7], _k65, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[9], _k65, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[11], _k65, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[6], _k66, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r6[8], _k66, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r6[10], _k66, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r6[12], _k66, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k05 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k06 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[3], _k03, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[4], _k04, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[5], _k05, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[6], _k06, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k15 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k16 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[3], _k13, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[4], _k14, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[5], _k15, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[6], _k16, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k25 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k26 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[3], _k23, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[4], _k24, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[5], _k25, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[6], _k26, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k35 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k36 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r3[0], _k30, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[1], _k31, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[2], _k32, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[3], _k33, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[4], _k34, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[5], _k35, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r3[6], _k36, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k45 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k46 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r4[0], _k40, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[1], _k41, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[2], _k42, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[3], _k43, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[4], _k44, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[5], _k45, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r4[6], _k46, vl);
vfloat16m1_t _k50 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k51 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k52 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k53 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k54 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k55 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k56 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f16m1(_sum0, r5[0], _k50, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[1], _k51, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[2], _k52, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[3], _k53, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[4], _k54, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[5], _k55, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r5[6], _k56, vl);
vfloat16m1_t _k60 = vle16_v_f16m1(kptr, vl);
vfloat16m1_t _k61 = vle16_v_f16m1(kptr + packn, vl);
vfloat16m1_t _k62 = vle16_v_f16m1(kptr + packn * 2, vl);
vfloat16m1_t _k63 = vle16_v_f16m1(kptr + packn * 3, vl);
vfloat16m1_t _k64 = vle16_v_f16m1(kptr + packn * 4, vl);
vfloat16m1_t _k65 = vle16_v_f16m1(kptr + packn * 5, vl);
vfloat16m1_t _k66 = vle16_v_f16m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f16m1(_sum0, r6[0], _k60, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[1], _k61, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[2], _k62, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[3], _k63, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[4], _k64, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[5], _k65, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r6[6], _k66, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
pagerank_cilk.c
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <memory.h>
#include <sys/time.h>
#include <cilk/cilk.h>
#include <cilk/reducer_opadd.h>
#ifndef REAL
#define REAL float
#endif
#define ALPHA 0.85
#define EPSILON 0.01
#define ARRAY_LENGTH 600000000
#define MAX_TIMES 1000
#define MICRO_IN_SEC 1000000.00
double begin_time, end_time, serial_time=0, parallel_time=0;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
typedef struct{
int nodei;
int nodej;
REAL p;
} EDGE;
int caculate(char *ifn);
int check_result(REAL *r, REAL *rtmp, int noden);
REAL compute_pr( REAL probablity, REAL rtmp, int noden){
return ( ( ALPHA * probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp;
}
int main(int argc,char * argv[])
{
char *ifn=NULL;
if(argc<2)
{
printf("wrong command format! usage:parallel_pagerank INPUTFILENAME\n");
return 0;
}
else
{
ifn=argv[1];
caculate(ifn);
return 0;
}
}
int caculate(char *ifn) {
begin_time = microtime();
FILE *ifp=NULL,*ofp=NULL;
EDGE edge,*array_edge=NULL;
char *ofn="CaculateResult.txt";
int noden,edgen,foffset,linen,i,j,begin,topi,counter,index_i,edge_i;
REAL *r=NULL,*rtmp=NULL,*tmp,*array=NULL;
if((ifp=fopen(ifn,"r"))==NULL)
{
printf("%s file open error!\n",ifn);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn);
}
if((ofp=fopen(ofn,"w"))==NULL)
{
printf("%s file open error!\n",ofn);
fclose(ifp);
exit(0);
}
else
{
printf("%s file opened success!\n",ofn);
}
fscanf(ifp,"%d%d",&noden,&edgen);
foffset=ftell(ifp);
printf("Allocing Memory!\n");
if((array_edge=(EDGE *)malloc(edgen*sizeof(EDGE)))==NULL) {
printf("Memory alloc ERROR !\n");
fclose(ifp);
fclose(ofp);
exit(0);
}
linen=ARRAY_LENGTH/noden;
if(linen<=0) {
printf("ArrayLength is too short for this caculate!\nPlase change the value of ARRAYLENGTH\n");
free(array_edge);
fclose(ifp);
fclose(ofp);
exit(0);
}
if((array=(REAL *)malloc(linen*noden*sizeof(REAL)))==NULL) {
printf("Memory alloc ERROR !\n");
fclose(ifp);
fclose(ofp);
free(array_edge);
exit(0);
}
if((r=(REAL *)malloc(noden*sizeof(REAL)))==NULL) {
printf("Memory alloc ERROR !\n");
fclose(ifp);
fclose(ofp);
free(array);
exit(0);
}
if((rtmp=(REAL *)malloc(noden*sizeof(REAL)))==NULL) {
fclose(ifp);
fclose(ofp);
free(array);
free(r);
exit(0);
}
REAL** array_tmp = &array;
#pragma simd
#pragma ivdep
#pragma vector always
cilk_for(int i=0;i<noden;i++) {
*(r+i)=1.0;
}
printf("Memory Alloc done!\n");
printf("Caculating pagerank!\n");
printf("Loding Data!\n");
for(int i=0;i<edgen;i++) {
fscanf(ifp,"%d%d%f",&((array_edge+i)->nodei),&((array_edge+i)->nodej),&((array_edge+i)->p));
}
printf("Data loaded!\n");
end_time = microtime();
printf("read file and alloc memory time consuming:%fs\n",end_time-begin_time);
begin_time = end_time;
printf("Begin Caculate!\n");
counter=MAX_TIMES;
REAL pr_tmp=0.0;
REAL matrix_probablity = 0;
begin_time = microtime();
if(noden<=linen) {
/*
end_time = microtime();
printf("read file and alloc memory time consuming:%fs\n",end_time-begin_time);
begin_time = end_time;
*/
//#pragma omp parallel for
#pragma simd
#pragma vector always
#pragma ivdep
cilk_for(int i=0;i<noden*noden;i++) {
array[i]=0;
}
/*
end_time = microtime();
parallel_time += end_time-begin_time;
begin_time = end_time;
*/
cilk_for(int i=0;i<edgen;i++) {
*(array+(((array_edge+i)->nodei)*noden+(array_edge+i)->nodej))=(array_edge+i)->p;
}
/*
end_time = microtime();
serial_time += end_time-begin_time;
begin_time= end_time;
*/
do {
tmp=rtmp;
rtmp=r;
r=tmp;
//caculate PageRank
cilk_for(int i=0;i<noden;i++) {
/*
#pragma simd
#pragma vector always
#pragma ivdep
for(int j=0;j<noden;j++) {
matrix_probablity = array[i*noden+j];
pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j];
}
*(r+i) = pr_tmp;
*/
r[i] = __sec_reduce_add ( compute_pr((array+i*noden)[0:noden], rtmp[0,noden] , noden) );
}
/*
end_time = microtime();
serial_time += end_time-begin_time;
begin_time = end_time;
*/
// printf("serial part time consuming:%fs\nparallel part time consuming:%fs\n",serial_time,parallel_time);
parallel_time = microtime()-begin_time;
printf(" parallel part time consuming %fs\n", parallel_time);
counter--;
printf("counter = %d ", counter);
printf(" first pagerank = %f, noden= %d linen= %d \n",r[0], noden, linen);
} while((!check_result(r,rtmp,noden)) && counter);
} else {
int block_counter=0;
int ii=0;
do {
tmp=rtmp;
rtmp=r;
r=tmp;
begin=0;
edge_i=0;
block_counter=0;
/*
end_time = microtime();
serial_time += end_time-begin_time;
begin_time = end_time;
*/
for(int ii=0;ii<noden/linen;ii++) {
/*
end_time = microtime();
serial_time += end_time-begin_time;
begin_time = end_time;
*/
//#pragma omp parallel for
#pragma simd
#pragma vector always
#pragma ivdep
cilk_for(int i=0;i<linen*noden;i++) {
array[i]=0;
}
/*
end_time = microtime();
parallel_time += end_time - begin_time;
begin_time = end_time;
*/
do{
if((array_edge+edge_i)->nodei>=begin+linen) {
break;
} else {
*(array+(((array_edge+edge_i)->nodei%linen)*noden+(array_edge+edge_i)->nodej))=(array_edge+edge_i)->p;
edge_i++;
}
} while(edge_i<edgen);
//#pragma omp parallel for
cilk_for(int index_i=begin;index_i<begin+linen;index_i++) {
/*
#pragma simd
#pragma vector always
#pragma ivdep
for(int j=0;j<noden;j++) {
matrix_probablity = array[(index_i%linen)*noden+j];
pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j];
}
*(r+index_i) = pr_tmp;
*/
// r[index_i] = __sec_reduce_add ((array+(index_i%linen)*noden)[0:noden] * rtmp[0:noden] );
r[index_i] = __sec_reduce_add ( compute_pr ((array+(index_i%linen)*noden)[0:noden], rtmp[0:noden] , noden));
}
// r[begin:linen] = __sec_reduce_add ( array_tmp[0:linen][0:noden] * rtmp[0:noden] );
begin+=linen;
block_counter++;
if(block_counter%1000 == 0) printf("block_counter:%d\n",block_counter);
if(block_counter == 6000 || block_counter == (noden/linen - 1)){
/*
end_time = microtime();
serial_time += end_time - begin_time;
begin_time = end_time;
*/
// printf("block_counter:%d\nserial part time consuming:%fs\nparallel part time consuming:%fs\n",block_counter,serial_time,parallel_time);
printf("block_counter:%d\n parallel part time consuming:%fs\n",block_counter,microtime()-begin_time);
exit(0);
}
}
if(noden%linen != 0) {
/*
end_time = microtime();
serial_time += end_time - begin_time;
begin_time = end_time;
*/
//#pragma omp parallel for
cilk_for(int i=0;i<(noden%linen)*noden;i++) {
array[i]=0;
}
/*
end_time = microtime();
parallel_time += end_time - begin_time;
begin_time = end_time;
*/
do{
if((array_edge+edge_i)->nodei>=begin+linen) {
break;
} else {
*(array+(((array_edge+edge_i)->nodei%linen)*noden+(array_edge+edge_i)->nodej))=(array_edge+edge_i)->p;
edge_i++;
}
} while(edge_i<edgen);
//#pragma omp parallel for
cilk_for(int index_i=begin;index_i<noden;index_i++) {
/*
//#pragma omp parallel for reduction(+:pr_tmp)
#pragma simd
#pragma vector always
#pragma ivdep
for(int j=0;j<noden;j++) {
matrix_probablity = array[(index_i%linen)*noden+j];
pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j];
}
end_time = microtime();
parallel_time += end_time - begin_time;
begin_time = end_time;
*(r+index_i) = pr_tmp;
*/
r[index_i] = __sec_reduce_add ( compute_pr ((array+(index_i%linen)*noden)[0:noden], rtmp[0:noden] , noden));
}
block_counter++;
if(block_counter%100 == 0) printf("block_counter=%d begin=%d\n",block_counter,begin);
}
counter--;
printf("counter = %d ", counter);
printf(" first pagerank = %f, noden= %d linen= %d \n",r[0], noden, linen);
} while((!check_result(r,rtmp,noden)) && counter);
}
printf("caculate done !\n");
printf("outputing result to %s\n",ofn);
for(int i=0;i<noden;i++) {
fprintf(ofp,"%d\t%f\n",i,*(r+i));
}
printf("output done!,counter times:%d\n",counter);
fclose(ifp);
fclose(ofp);
free(array);
free(array_edge);
free(rtmp);
free(r);
return 0;
}
int check_result(REAL *r, REAL *rtmp, int noden) {
int i;
for(int i=0;i<noden;i++) {
if(!(*(r+i)-*(rtmp+i)<EPSILON && *(rtmp+i)-*(r+i)<EPSILON)) {
return 0;
}
}
return 1;
}
|
image_random-inl.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TShape &shp = (*in_attrs)[0];
if (!shp.ndim()) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
nnvm::Tuple<float> mean;
nnvm::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(nnvm::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(nnvm::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
uint32_t nchannels;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[3] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (uint32_t i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
nnvm::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const nnvm::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
bml_allocate_ellpack_typed.c
|
#include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Clear a matrix.
*
* Numbers of non-zeroes, indeces, and values are set to zero.
*
* \ingroup allocate_group
*
* \param A The matrix.
*/
void TYPED_FUNC(
bml_clear_ellpack) (
bml_matrix_ellpack_t * A)
{
REAL_T *A_value = A->value;
#if defined (USE_OMP_OFFLOAD)
int *A_index = A->index;
int *A_nnz = A->nnz;
int N = A->N;
int M = A->M;
#pragma omp target teams distribute parallel for
for (int i = 0; i < N; i++)
{
A_nnz[i] = 0;
}
#pragma omp target teams distribute parallel for collapse(2) schedule (static, 1)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
A_index[ROWMAJOR(i, j, N, M)] = 0;
A_value[ROWMAJOR(i, j, N, M)] = 0.0;
}
}
#else // conditional for offload
#ifdef INTEL_OPT
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < (A->N * A->M); i++)
{
__assume_aligned(A->index, MALLOC_ALIGNMENT);
__assume_aligned(A_value, MALLOC_ALIGNMENT);
A->index[i] = 0;
A_value[i] = 0.0;
}
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < A->N; i++)
{
__assume_aligned(A->nnz, MALLOC_ALIGNMENT);
A->nnz[i] = 0;
}
#else
memset(A->nnz, 0, A->N * sizeof(int));
memset(A->index, 0, A->N * A->M * sizeof(int));
memset(A->value, 0.0, A->N * A->M * sizeof(REAL_T));
#endif
#endif // conditional for offload
}
/** Allocate a matrix with uninitialized values.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param matrix_dimension The matrix size.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t
* TYPED_FUNC(bml_noinit_matrix_ellpack) (bml_matrix_dimension_t
matrix_dimension,
bml_distribution_mode_t
distrib_mode)
{
bml_matrix_ellpack_t *A =
bml_noinit_allocate_memory(sizeof(bml_matrix_ellpack_t));
A->matrix_type = ellpack;
A->matrix_precision = MATRIX_PRECISION;
A->N = matrix_dimension.N_rows;
A->M = matrix_dimension.N_nz_max;
A->distribution_mode = distrib_mode;
A->index = bml_noinit_allocate_memory(sizeof(int) * A->N * A->M);
A->nnz = bml_allocate_memory(sizeof(int) * A->N);
A->value = bml_noinit_allocate_memory(sizeof(REAL_T) * A->N * A->M);
A->domain = bml_default_domain(A->N, A->M, distrib_mode);
A->domain2 = bml_default_domain(A->N, A->M, distrib_mode);
#if defined(USE_OMP_OFFLOAD)
int N = A->N;
int M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
#pragma omp target enter data map(alloc:A_value[:N*M], A_index[:N*M], A_nnz[:N])
#pragma omp target update to(A_value[:N*M], A_index[:N*M], A_nnz[:N])
#endif
return A;
}
/** Allocate the zero matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_zero_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
assert(M > 0);
bml_matrix_ellpack_t *A =
bml_allocate_memory(sizeof(bml_matrix_ellpack_t));
A->matrix_type = ellpack;
A->matrix_precision = MATRIX_PRECISION;
A->N = N;
A->M = M;
A->distribution_mode = distrib_mode;
// need to keep these allocates for host copy
A->index = bml_allocate_memory(sizeof(int) * N * M);
A->nnz = bml_allocate_memory(sizeof(int) * N);
A->value = bml_allocate_memory(sizeof(REAL_T) * N * M);
REAL_T *A_value = A->value;
A->domain = bml_default_domain(N, M, distrib_mode);
A->domain2 = bml_default_domain(N, M, distrib_mode);
#if defined(USE_OMP_OFFLOAD)
int *A_nnz = A->nnz;
int *A_index = A->index;
int NM = N * M;
#pragma omp target enter data map(alloc:A_value[:N*M], A_index[:N*M], A_nnz[:N])
#pragma omp target teams distribute parallel for schedule (static, 1)
for (int i = 0; i < N; i++)
{
A_nnz[i] = 0;
}
#pragma omp target teams distribute parallel for collapse(2) schedule (static, 1)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
A_index[ROWMAJOR(i, j, N, M)] = 0;
A_value[ROWMAJOR(i, j, N, M)] = 0.0;
}
}
#endif
return A;
}
/** Allocate a banded random matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_banded_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
const REAL_T INV_RAND_MAX = 1.0 / (REAL_T) RAND_MAX;
#pragma omp parallel for shared(A_value, A_index, A_nnz)
for (int i = 0; i < N; i++)
{
int jind = 0;
for (int j = (i - M / 2 >= 0 ? i - M / 2 : 0);
j < (i - M / 2 + M <= N ? i - M / 2 + M : N); j++)
{
A_value[ROWMAJOR(i, jind, N, M)] = rand() * INV_RAND_MAX;
A_index[ROWMAJOR(i, jind, N, M)] = j;
jind++;
}
A_nnz[i] = jind;
}
#if defined(USE_OMP_OFFLOAD)
#pragma omp target update to(A_value[:N*M], A_index[:N*M], A_nnz[:N])
#endif
return A;
}
/** Allocate a random matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*
* Note: Do not use OpenMP when setting values for a random matrix,
* this makes the operation non-repeatable.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_random_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
const REAL_T INV_RAND_MAX = 1.0 / (REAL_T) RAND_MAX;
for (int i = 0; i < N; i++)
{
int jind = 0;
for (int j = 0; j < M; j++)
{
A_value[ROWMAJOR(i, jind, N, M)] = rand() * INV_RAND_MAX;
A_index[ROWMAJOR(i, jind, N, M)] = j;
jind++;
}
A_nnz[i] = jind;
}
#if defined(USE_OMP_OFFLOAD)
#pragma omp target update to(A_value[:N*M], A_index[:N*M], A_nnz[:N])
#endif
return A;
}
/** Allocate the identity matrix.
*
* Note that the matrix \f$ a \f$ will be newly allocated. If it is
* already allocated then the matrix will be deallocated in the
* process.
*
* \ingroup allocate_group
*
* \param matrix_precision The precision of the matrix. The default
* is double precision.
* \param N The matrix size.
* \param M The number of non-zeroes per row.
* \param distrib_mode The distribution mode.
* \return The matrix.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_identity_matrix_ellpack) (
int N,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
REAL_T *A_value = A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#pragma omp parallel for shared(A_value, A_index, A_nnz)
for (int i = 0; i < N; i++)
{
#ifdef INTEL_OPT
__assume_aligned(A_value, MALLOC_ALIGNMENT);
__assume_aligned(A_index, MALLOC_ALIGNMENT);
__assume_aligned(A_nnz, MALLOC_ALIGNMENT);
#endif
A_value[ROWMAJOR(i, 0, N, M)] = (REAL_T) 1.0;
A_index[ROWMAJOR(i, 0, N, M)] = i;
A_nnz[i] = 1;
}
#if defined(USE_OMP_OFFLOAD)
#pragma omp target update to(A_value[:N*M], A_index[:N*M], A_nnz[:N])
#endif
return A;
}
|
find_most_influential.h
|
//===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <[email protected]>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_MPI_FIND_MOST_INFLUENTIAL_H
#define RIPPLES_MPI_FIND_MOST_INFLUENTIAL_H
#include "ripples/find_most_influential.h"
#include "ripples/streaming_find_most_influential.h"
#include "ripples/utility.h"
#if RIPPLES_ENABLE_CUDA
#include "ripples/cuda/cuda_utils.h"
#endif
#include "spdlog/spdlog.h"
namespace ripples {
template <typename GraphTy>
class MPIStreamingFindMostInfluential {
using vertex_type = typename GraphTy::vertex_type;
using worker_type = FindMostInfluentialWorker<GraphTy>;
using cpu_worker_type = CPUFindMostInfluentialWorker<GraphTy>;
#ifdef RIPPLES_ENABLE_CUDA
using gpu_worker_type = GPUFindMostInfluentialWorker<GraphTy>;
#endif
using rrr_set_iterator =
typename FindMostInfluentialWorker<GraphTy>::rrr_set_iterator;
CompareHeap<GraphTy> cmpHeap;
using priorityQueue =
std::priority_queue<std::pair<vertex_type, size_t>,
std::vector<std::pair<vertex_type, size_t>>,
decltype(cmpHeap)>;
public:
MPIStreamingFindMostInfluential(const GraphTy &G, RRRsets<GraphTy> &RRRsets,
size_t num_max_cpu, size_t num_gpus)
: num_cpu_workers_(num_max_cpu),
num_gpu_workers_(num_gpus),
workers_(),
vertex_coverage_(G.num_nodes(), 0),
reduced_vertex_coverage_(G.num_nodes(), 0),
queue_storage_(G.num_nodes()),
d_counters_(num_gpus, 0),
RRRsets_(RRRsets),
reduction_steps_(1),
d_cpu_counters_(nullptr) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef RIPPLES_ENABLE_CUDA
// Get Number of device and allocate 1 thread each.
// num_gpu_workers_ = cuda_num_devices();
num_cpu_workers_ -= num_gpu_workers_;
std::fill(vertex_coverage_.begin(), vertex_coverage_.end(), 0);
// Allocate Counters
if (num_gpu_workers_ > 0) {
#pragma omp parallel num_threads(num_gpu_workers_)
{
size_t rank = omp_get_thread_num();
cuda_set_device(rank);
cuda_malloc(reinterpret_cast<void **>(&d_counters_[rank]),
sizeof(uint32_t) * G.num_nodes());
if (rank == 0) {
cuda_malloc(reinterpret_cast<void **>(&d_cpu_counters_),
sizeof(uint32_t) * G.num_nodes());
cuda_malloc(reinterpret_cast<void **>(&d_cpu_reduced_counters_),
sizeof(uint32_t) * G.num_nodes());
}
}
}
#endif
workers_.push_back(new CPUFindMostInfluentialWorker<GraphTy>(
vertex_coverage_, queue_storage_, RRRsets_.begin(), RRRsets_.end(),
num_cpu_workers_, d_cpu_counters_));
#ifdef RIPPLES_ENABLE_CUDA
if (num_gpu_workers_ == 0) return;
// Define Reduction tree on GPU workers.
auto tree = cuda_get_reduction_tree();
// Construct GPU workers
for (size_t i = 0; i < num_gpu_workers_; ++i) {
reduction_steps_ = std::max(reduction_steps_, tree[i].second);
// std::cout << "step " << tree[i].second << " : " << i << " -> " <<
// tree[i].first << std::endl;
uint32_t *dest = i == 0 ? d_cpu_counters_ : d_counters_[tree[i].first];
workers_.push_back(new GPUFindMostInfluentialWorker<GraphTy>(
i, G.num_nodes(), d_counters_, tree[i].first, tree[i].second, dest));
}
#endif
}
~MPIStreamingFindMostInfluential() {
#ifdef RIPPLES_ENABLE_CUDA
for (auto b : d_counters_) {
cuda_free(b);
}
if (num_gpu_workers_ > 0) {
cuda_free(d_cpu_counters_);
cuda_free(d_cpu_reduced_counters_);
}
#endif
for (auto w : workers_) {
delete w;
}
}
void InitialCount() {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
workers_[rank]->InitialCount();
}
}
void ReduceCounters() {
uint32_t *dest = reduced_vertex_coverage_.data();
uint32_t *src = vertex_coverage_.data();
#if RIPPLES_ENABLE_CUDA
if (num_gpu_workers_ != 0) {
dest = d_cpu_reduced_counters_;
src = d_cpu_counters_;
cuda_memset(reinterpret_cast<void *>(src), 0,
sizeof(uint32_t) * vertex_coverage_.size());
for (ssize_t i = reduction_steps_; i >= 0; --i) {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
workers_[rank]->ReduceCounters(i);
}
}
// std::cout << "Before Reduction " << src << std::endl;
std::vector<uint32_t> tmp(vertex_coverage_.size(), 0);
cuda_set_device(0);
cuda_memset(reinterpret_cast<void *>(dest), 0,
sizeof(uint32_t) * vertex_coverage_.size());
cuda_d2h(reinterpret_cast<void *>(tmp.data()),
reinterpret_cast<void *>(src),
sizeof(uint32_t) * vertex_coverage_.size());
// MPI_Reduce(src, dest, vertex_coverage_.size(),
// MPI_UINT32_T, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(tmp.data(), reduced_vertex_coverage_.data(),
vertex_coverage_.size(), MPI_UINT32_T, MPI_SUM, 0,
MPI_COMM_WORLD);
// if (mpi_rank == 0) {
// for (size_t i = 0; i < 10; ++i) {
// std::cout << "Reduce[" << i << "] = "<< reduced_vertex_coverage_[i]
// << std::endl;
// }
// }
// for (size_t i = 0; i < 10; ++i) {
// std::cout << "P[" << mpi_rank << "](" << i << ") = " << tmp[i]
// << std::endl;
// }
cuda_h2d(reinterpret_cast<void *>(dest),
reinterpret_cast<void *>(reduced_vertex_coverage_.data()),
sizeof(uint32_t) * vertex_coverage_.size());
} else
#endif
{
MPI_Reduce(src, dest, vertex_coverage_.size(), MPI_UINT32_T, MPI_SUM, 0,
MPI_COMM_WORLD);
}
}
void UpdateCounters(vertex_type last_seed) {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
workers_[rank]->UpdateCounters(last_seed);
}
}
priorityQueue getHeap() {
priorityQueue queue(cmpHeap, std::move(queue_storage_));
return queue;
}
std::pair<vertex_type, size_t> getNextSeed(priorityQueue &queue_) {
ReduceCounters();
#ifdef RIPPLES_ENABLE_CUDA
if (num_gpu_workers_ != 0) {
uint32_t *global_counter = d_cpu_reduced_counters_;
if (mpi_rank == 0) {
cuda_set_device(0);
auto result = CudaMaxElement(global_counter, vertex_coverage_.size());
// std::cout << "Max Element " << result.first << " " << result.second
// << std::endl;
coveredAndSelected[0] += result.second;
coveredAndSelected[1] = result.first;
}
MPI_Bcast(&coveredAndSelected, 2, MPI_UINT32_T, 0, MPI_COMM_WORLD);
// std::cout << "$$$$ " << mpi_rank << " "<< coveredAndSelected[0] <<
// std::endl;
return std::pair<vertex_type, size_t>(coveredAndSelected[1],
coveredAndSelected[0]);
}
#endif
if (mpi_rank == 0) {
uint32_t vertex = 0;
uint32_t coverage = 0;
// auto itr = std::max_element(reduced_vertex_coverage_.begin(), reduced_vertex_coverage_.end());
#pragma omp parallel
{
uint32_t vertex_local = 0;
uint32_t coverage_local = 0;
#pragma omp for
for (uint32_t i = 0; i < reduced_vertex_coverage_.size(); ++i) {
if (coverage_local < reduced_vertex_coverage_[i]) {
coverage_local = reduced_vertex_coverage_[i];
vertex_local = i;
}
}
#pragma omp critical
{
if (coverage < coverage_local) {
coverage = coverage_local;
vertex = vertex_local;
}
}
}
coveredAndSelected[0] += coverage;
coveredAndSelected[1] = vertex;
}
MPI_Bcast(&coveredAndSelected, 2, MPI_UINT32_T, 0, MPI_COMM_WORLD);
return std::pair<vertex_type, size_t>(coveredAndSelected[1],
coveredAndSelected[0]);
}
void LoadDataToDevice() {
if (num_gpu_workers_ == 0) return;
std::vector<PartitionIndices<rrr_set_iterator>> indices(num_gpu_workers_);
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
if (rank != 0) {
size_t threadnum = omp_get_thread_num() - 1,
numthreads = omp_get_num_threads() - 1;
size_t low = RRRsets_.size() * threadnum / numthreads,
high = RRRsets_.size() * (threadnum + 1) / numthreads;
indices[threadnum] = workers_[rank]->LoadData(
RRRsets_.begin() + low,
std::min(RRRsets_.end(), RRRsets_.begin() + high));
}
}
size_t num_threads = num_gpu_workers_;
for (size_t j = 1; j < num_threads; j <<= 1) {
#pragma omp parallel num_threads(num_threads >> j)
{
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < (num_threads - j); i += j * 2) {
indices[i] = indices[i].mergeBlocks(indices[i + j],
std::min(2 * j, num_threads));
}
}
}
workers_[0]->set_first_rrr_set(indices[0].pivot);
}
auto find_most_influential_set(size_t k) {
omp_set_max_active_levels(2);
LoadDataToDevice();
InitialCount();
// std::cout << "Initial Count Done" << std::endl;
auto queue = getHeap();
std::vector<vertex_type> result;
result.reserve(k);
std::chrono::duration<double, std::milli> seedSelection(0);
while (true) {
// std::cout << "Get Seed" << std::endl;
auto start = std::chrono::high_resolution_clock::now();
auto element = getNextSeed(queue);
auto end = std::chrono::high_resolution_clock::now();
seedSelection += end - start;
result.push_back(element.first);
if (result.size() == k) break;
// std::cout << "Update counters" << std::endl;
// std::cout << *std::max_element(vertex_coverage_.begin(), vertex_coverage_.end()) << std::endl;
UpdateCounters(element.first);
// std::cout << "Done update counters" << std::endl;
// std::cout << *std::max_element(vertex_coverage_.begin(), vertex_coverage_.end()) << std::endl;
}
int world_size = 0;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
double f = double(coveredAndSelected[0]) / (world_size * RRRsets_.size());
// if (mpi_rank == 0) {
// std::cout << f << " = " << double(coveredAndSelected[0]) << "/ (" <<
// world_size << " * " <<
// RRRsets_.size() << ")" << std::endl;
// }
// double f = double(RRRsets_.size() - uncovered) / RRRsets_.size();
// std::cout << "#### " << seedSelection.count() << std::endl;
omp_set_max_active_levels(1);
return std::make_pair(f, result);
}
private:
size_t num_cpu_workers_, num_gpu_workers_;
ssize_t reduction_steps_;
RRRsets<GraphTy> &RRRsets_;
std::vector<worker_type *> workers_;
std::vector<uint32_t *> d_counters_;
uint32_t *d_cpu_counters_;
uint32_t *d_cpu_reduced_counters_;
std::vector<uint32_t> vertex_coverage_;
std::vector<uint32_t> reduced_vertex_coverage_;
std::vector<std::pair<vertex_type, size_t>> queue_storage_;
int mpi_rank;
uint32_t coveredAndSelected[2] = {0, 0};
};
//! \brief Select k seeds starting from the a list of Random Reverse
//! Reachability Sets.
//!
//! \tparam GraphTy The graph type.
//! \tparam RRRset The type storing Random Reverse Reachability Sets.
//!
//! \param G The input graph.
//! \param k The size of the seed set.
//! \param RRRsets A vector of Random Reverse Reachability sets.
//! \param ex_tag The MPI+OpenMP execution tag.
//!
//! \return a pair where the size_t is the number of RRRset covered and
//! the set of vertices selected as seeds.
#if 0
template <typename GraphTy, typename RRRset>
auto FindMostInfluentialSet(const GraphTy &G, size_t k,
std::vector<RRRset> &RRRsets,
mpi_omp_parallel_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
std::vector<uint32_t> vertexCoverage(G.num_nodes(), 0);
std::vector<uint32_t> reduceCoverageInfo(G.num_nodes(), 0);
auto cmp = [](std::pair<vertex_type, uint32_t> &a,
std::pair<vertex_type, uint32_t> &b) {
return a.second < b.second;
};
using priorityQueue =
std::priority_queue<std::pair<vertex_type, uint32_t>,
std::vector<std::pair<vertex_type, uint32_t>>,
decltype(cmp)>;
MPI_Win win;
MPI_Win_create(reduceCoverageInfo.data(), G.num_nodes() * sizeof(uint32_t),
sizeof(uint32_t), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
CountOccurrencies(RRRsets.begin(), RRRsets.end(), vertexCoverage.begin(),
vertexCoverage.end(),
std::forward<omp_parallel_tag>(omp_parallel_tag{}));
MPI_Win_fence(0, win);
MPI_Accumulate(vertexCoverage.data(), G.num_nodes(), MPI_UINT32_T, 0, 0,
G.num_nodes(), MPI_UINT32_T, MPI_SUM, win);
MPI_Win_fence(0, win);
MPI_Win_free(&win);
std::vector<std::pair<vertex_type, uint32_t>> queue_storage;
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
queue_storage.resize(G.num_nodes());
InitHeapStorage(reduceCoverageInfo.begin(), reduceCoverageInfo.end(),
queue_storage.begin(), queue_storage.end(),
std::forward<omp_parallel_tag>(omp_parallel_tag{}));
}
priorityQueue queue(cmp, std::move(queue_storage));
std::vector<typename GraphTy::vertex_type> result;
result.reserve(k);
auto end = RRRsets.end();
uint32_t coveredAndSelected[2] = {0, 0};
while (result.size() < k) {
if (rank == 0) {
auto element = queue.top();
queue.pop();
if (element.second > reduceCoverageInfo[element.first]) {
element.second = reduceCoverageInfo[element.first];
queue.push(element);
continue;
}
coveredAndSelected[0] += element.second;
coveredAndSelected[1] = element.first;
}
MPI_Bcast(&coveredAndSelected, 2, MPI_UINT32_T, 0, MPI_COMM_WORLD);
vertex_type v = coveredAndSelected[1];
auto cmp = [=](const RRRset &a) -> auto {
return !std::binary_search(a.begin(), a.end(), v);
};
auto itr = partition(RRRsets.begin(), end, cmp, omp_parallel_tag{});
if (std::distance(itr, end) < std::distance(RRRsets.begin(), itr)) {
UpdateCounters(itr, end, vertexCoverage, omp_parallel_tag{});
} else {
#pragma omp parallel for simd
for (size_t i = 0; i < vertexCoverage.size(); ++i) vertexCoverage[i] = 0;
CountOccurrencies(RRRsets.begin(), itr, vertexCoverage.begin(),
vertexCoverage.end(), omp_parallel_tag{});
}
end = itr;
MPI_Reduce(vertexCoverage.data(), reduceCoverageInfo.data(), G.num_nodes(),
MPI_UINT32_T, MPI_SUM, 0, MPI_COMM_WORLD);
result.push_back(v);
}
int world_size = 0;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
double f = double(coveredAndSelected[0]) / (world_size * RRRsets.size());
std::cout << "Fraction covered " << f << std::endl;
return std::make_pair(f, result);
}
#endif
template <typename GraphTy, typename ConfTy, typename RRRset>
auto FindMostInfluentialSet(const GraphTy &G, const ConfTy &CFG,
std::vector<RRRset> &RRRsets, bool enableGPU,
mpi_omp_parallel_tag &&ex_tag) {
size_t num_gpu = 0;
size_t num_max_cpu = 0;
#pragma omp single
{
num_max_cpu =
std::min<size_t>(omp_get_max_threads(), CFG.seed_select_max_workers);
}
#ifdef RIPPLES_ENABLE_CUDA
if (enableGPU) {
num_gpu = std::min(cuda_num_devices(), CFG.seed_select_max_gpu_workers);
}
#endif
MPIStreamingFindMostInfluential<GraphTy> SE(G, RRRsets, num_max_cpu, num_gpu);
return SE.find_most_influential_set(CFG.k);
}
} // namespace ripples
#endif // RIPPLES_MPI_FIND_MOST_INFLUENTIAL_H
|
opencl_zip_fmt_plug.c
|
/*
*
* This software is Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com>
* with some code (c) 2012 Lukas Odzioba <[email protected]>
* and improvements (c) 2014 by magnum and JimF.
*
* This is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_zip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_zip);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "common-opencl.h"
#include "pkzip.h"
#include "dyna_salt.h"
#include "hmac_sha.h"
#include "options.h"
#include "stdint.h"
#define OPENCL_FORMAT 1
#include "pbkdf2_hmac_sha1.h"
#define FORMAT_LABEL "zip-opencl"
#define FORMAT_NAME "ZIP"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES"
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
# define SWAP(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define BINARY_ALIGN MEM_ALIGN_NONE
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(my_salt*)
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} zip_password;
typedef struct {
uint32_t v[(2 * KEY_LENGTH(3) + PWD_VER_LENGTH + 3) / 4];
} zip_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} zip_salt;
typedef struct my_salt_t {
dyna_salt dsalt;
uint32_t comp_len;
struct {
uint16_t type : 4;
uint16_t mode : 4;
} v;
unsigned char passverify[2];
unsigned char salt[SALT_LENGTH(3)];
//uint64_t data_key; // MSB of md5(data blob). We lookup using this.
unsigned char datablob[1];
} my_salt;
static my_salt *saved_salt;
static unsigned char (*crypt_key)[WINZIP_BINARY_SIZE];
static cl_int cl_error;
static zip_password *inbuffer;
static zip_hash *outbuffer;
static zip_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(zip_password) * gws;
outsize = sizeof(zip_hash) * gws;
settingsize = sizeof(zip_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
crypt_key = mem_calloc(gws, sizeof(*crypt_key));
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_key) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(crypt_key);
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(zip_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static void *get_salt(char *ciphertext)
{
int i;
my_salt salt, *psalt;
static unsigned char *ptr;
/* extract data from "ciphertext" */
c8 *copy_mem = strdup(ciphertext);
c8 *cp, *p;
if (!ptr) ptr = mem_alloc_tiny(sizeof(my_salt*),sizeof(my_salt*));
p = copy_mem + WINZIP_TAG_LENGTH+1; /* skip over "$zip2$*" */
memset(&salt, 0, sizeof(salt));
cp = strtokm(p, "*"); // type
salt.v.type = atoi((const char*)cp);
cp = strtokm(NULL, "*"); // mode
salt.v.mode = atoi((const char*)cp);
cp = strtokm(NULL, "*"); // file_magic enum (ignored)
cp = strtokm(NULL, "*"); // salt
for (i = 0; i < SALT_LENGTH(salt.v.mode); i++)
salt.salt[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
cp = strtokm(NULL, "*"); // validator
salt.passverify[0] = (atoi16[ARCH_INDEX(cp[0])]<<4) | atoi16[ARCH_INDEX(cp[1])];
salt.passverify[1] = (atoi16[ARCH_INDEX(cp[2])]<<4) | atoi16[ARCH_INDEX(cp[3])];
cp = strtokm(NULL, "*"); // data len
sscanf((const char *)cp, "%x", &salt.comp_len);
// later we will store the data blob in our own static data structure, and place the 64 bit LSB of the
// MD5 of the data blob into a field in the salt. For the first POC I store the entire blob and just
// make sure all my test data is small enough to fit.
cp = strtokm(NULL, "*"); // data blob
// Ok, now create the allocated salt record we are going to return back to John, using the dynamic
// sized data buffer.
psalt = (my_salt*)mem_calloc(1, sizeof(my_salt) + salt.comp_len);
psalt->v.type = salt.v.type;
psalt->v.mode = salt.v.mode;
psalt->comp_len = salt.comp_len;
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
memcpy(psalt->salt, salt.salt, sizeof(salt.salt));
psalt->passverify[0] = salt.passverify[0];
psalt->passverify[1] = salt.passverify[1];
// set the JtR core linkage stuff for this dyna_salt
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(my_salt, comp_len);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(my_salt, comp_len, datablob, psalt->comp_len);
if (strcmp((const char*)cp, "ZFILE")) {
for (i = 0; i < psalt->comp_len; i++)
psalt->datablob[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
} else {
c8 *Fn, *Oh, *Ob;
long len;
uint32_t id;
FILE *fp;
Fn = strtokm(NULL, "*");
Oh = strtokm(NULL, "*");
Ob = strtokm(NULL, "*");
fp = fopen((const char*)Fn, "rb");
if (!fp) {
psalt->v.type = 1; // this will tell the format to 'skip' this salt, it is garbage
goto Bail;
}
sscanf((const char*)Oh, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
id = fget32LE(fp);
if (id != 0x04034b50U) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
sscanf((const char*)Ob, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
if (fread(psalt->datablob, 1, psalt->comp_len, fp) != psalt->comp_len) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
fclose(fp);
}
Bail:;
MEM_FREE(copy_mem);
memcpy(ptr, &psalt, sizeof(my_salt*));
return (void*)ptr;
}
static void set_salt(void *salt)
{
saved_salt = *((my_salt**)salt);
memcpy((char*)currentsalt.salt, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode));
currentsalt.length = SALT_LENGTH(saved_salt->v.mode);
currentsalt.iterations = KEYING_ITERATIONS;
currentsalt.outlen = PWD_VER_LENGTH;
currentsalt.skip_bytes = 2 * KEY_LENGTH(saved_salt->v.mode);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (saved_salt->v.type) {
// This salt passed valid() but failed get_salt().
// Should never happen.
memset(crypt_key, 0, count * WINZIP_BINARY_SIZE);
return count;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]),
"Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]),
"Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
if (!memcmp((unsigned char*)outbuffer[index].v,
saved_salt->passverify, 2)) {
unsigned char pwd_ver[4+64];
pbkdf2_sha1(inbuffer[index].v,
inbuffer[index].length, saved_salt->salt,
SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS,
pwd_ver, KEY_LENGTH(saved_salt->v.mode),
KEY_LENGTH(saved_salt->v.mode));
hmac_sha1(pwd_ver,
KEY_LENGTH(saved_salt->v.mode),
(const unsigned char*)saved_salt->datablob,
saved_salt->comp_len,
crypt_key[index], WINZIP_BINARY_SIZE);
}
else
memset(crypt_key[index], 0, WINZIP_BINARY_SIZE);
}
return count;
}
static int get_hash_0(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++)
if (((ARCH_WORD_32*)&(crypt_key[i]))[0] == ((ARCH_WORD_32*)binary)[0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (((ARCH_WORD_32*)&(crypt_key[index]))[0] == ((ARCH_WORD_32*)binary)[0]);
}
static int cmp_exact(char *source, int index)
{
void *b = winzip_common_binary(source);
return !memcmp(b, crypt_key[index], sizeof(crypt_key[index]));
}
struct fmt_main fmt_opencl_zip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
WINZIP_BENCHMARK_COMMENT,
WINZIP_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
WINZIP_BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{ NULL },
winzip_common_tests
}, {
init,
done,
reset,
fmt_default_prepare,
winzip_common_valid,
fmt_default_split,
winzip_common_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
ellipticSEMFEMSetup.c
|
/*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "elliptic.h"
typedef struct{
dfloat VX;
dfloat VY;
dlong localId;
hlong globalId;
}FEMverts_t;
typedef struct {
dlong localId;
hlong globalId;
int ownerRank;
}parallelNode_t;
// compare on global owners
int parallelCompareOwnersAndGlobalId(const void *a, const void *b);
// compare on global indices
int parallelCompareGlobalId(const void *a, const void *b);
// compare xy coordinates
int parallelCompareFEMvertsLocation(const void *a, const void *b){
dfloat NODETOL = 1e-6;
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->VX < fb->VX - NODETOL) return -1;
if(fa->VX > fb->VX + NODETOL) return +1;
if(fa->VY < fb->VY - NODETOL) return -1;
if(fa->VY > fb->VY + NODETOL) return +1;
return 0;
}
// compare local id
int parallelCompareFEMvertsLocalId(const void *a, const void *b){
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->localId < fb->localId) return -1;
if(fa->localId > fb->localId) return +1;
return 0;
}
int parallelCompareRowColumn(const void *a, const void *b);
void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) {
setupAide options = elliptic->options;
if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) {
printf("SEMFEM is supported for CONTINUOUS only\n");
MPI_Barrier(elliptic->mesh->comm);
MPI_Finalize();
exit(0);
}
mesh_t* mesh = elliptic->mesh; //original mesh
mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space)
precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh
mesh_t *femMesh = precon->femMesh;
memcpy(pmesh ,mesh,sizeof(mesh_t));
memcpy(femMesh,mesh,sizeof(mesh_t));
if (elliptic->elementType==TRIANGLES) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL)
pmesh->vertexNodes[2] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTri2D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes2D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds is now populated
} else if (elliptic->elementType==TETRAHEDRA) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
pmesh->t = mesh->tFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0, f3=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n]+
pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[2] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL)
pmesh->vertexNodes[3] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTet3D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes3D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds is now populated
}
//now build the full degree 1 fem mesh
int femN = 1; //degree of fem approximation
/* allocate space for node coordinates */
femMesh->Nelements = mesh->NelFEM*mesh->Nelements;
femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong));
femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
if (elliptic->dim==3)
femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong));
// dlong NFEMverts = mesh->Nelements*mesh->NpFEM;
for(dlong e=0;e<mesh->Nelements;++e){
for (int n=0;n<mesh->NelFEM;n++) {
dlong id[femMesh->Nverts];
dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts;
for (int i=0;i<femMesh->Nverts;i++) {
//local ids in the subelement fem grid
id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i];
/* read vertex triplet for triangle */
femMesh->EToV[femId+i] = pmesh->globalIds[id[i]];
femMesh->EX[femId+i] = pmesh->x[id[i]];
femMesh->EY[femId+i] = pmesh->y[id[i]];
if (elliptic->dim==3)
femMesh->EZ[femId+i] = pmesh->z[id[i]];
}
switch(elliptic->elementType){
case TRIANGLES:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
break;
case QUADRILATERALS:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element
localIds[femId+3] = id[2];
break;
case TETRAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
localIds[femId+3] = id[3];
break;
case HEXAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element
localIds[femId+3] = id[2];
localIds[femId+4] = id[4];
localIds[femId+5] = id[5];
localIds[femId+6] = id[7];
localIds[femId+7] = id[6];
break;
}
}
}
// connect elements using parallel sort
meshParallelConnect(femMesh);
switch(elliptic->elementType){
case TRIANGLES:
meshLoadReferenceNodesTri2D(femMesh, femN);
break;
case QUADRILATERALS:
meshLoadReferenceNodesQuad2D(femMesh, femN);
break;
case TETRAHEDRA:
meshLoadReferenceNodesTet3D(femMesh, femN);
break;
case HEXAHEDRA:
meshLoadReferenceNodesHex3D(femMesh, femN);
break;
}
int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int));
for (int f=0;f<pmesh->Nfaces;f++) {
for (int n=0;n<pmesh->Nfp;n++) {
int id = pmesh->faceNodes[f*pmesh->Nfp+n];
faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face
}
}
//map from faces of fem sub-elements to the macro element face number
int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int));
for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1;
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
for (int face=0; face<pmesh->Nfaces;face++) {
//count the nodes on this face which are on a macro face
int NvertsOnFace = 0;
for (int i=0;i<femMesh->Nfp;i++){
int id = femMesh->faceNodes[f*femMesh->Nfp+i];
int v = mesh->FEMEToV[n*pmesh->Nverts+id];
NvertsOnFace += faceFlag[face*pmesh->Np + v];
}
if (NvertsOnFace == femMesh->Nfp)
femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face
}
}
}
//fill the boundary flag array
femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int));
for (dlong e=0;e<mesh->Nelements;e++) {
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
int face = femFaceMap[n*femMesh->Nfaces+f];
if (face>-1) {
femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face];
}
}
}
}
free(faceFlag);
free(femFaceMap);
switch(elliptic->elementType){
case TRIANGLES:
meshPhysicalNodesTri2D(femMesh);
meshGeometricFactorsTri2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsTri2D(femMesh);
break;
case QUADRILATERALS:
meshPhysicalNodesQuad2D(femMesh);
meshGeometricFactorsQuad2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsQuad2D(femMesh);
break;
case TETRAHEDRA:
meshPhysicalNodesTet3D(femMesh);
meshGeometricFactorsTet3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsTet3D(femMesh);
break;
case HEXAHEDRA:
meshPhysicalNodesHex3D(femMesh);
meshGeometricFactorsHex3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsHex3D(femMesh);
break;
}
// global nodes
meshParallelConnectNodes(femMesh);
dlong Ntotal = pmesh->Np*pmesh->Nelements;
int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0;
pmesh->maskedGlobalIds = (hlong *) calloc(Ntotal,sizeof(hlong));
memcpy(pmesh->maskedGlobalIds, pmesh->globalIds, Ntotal*sizeof(hlong));
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//build a new mask for NpFEM>Np node sets
// gather-scatter
pmesh->ogs = ogsSetup(Ntotal, pmesh->globalIds, mesh->comm, 0, verbose, mesh->device);
//make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann)
int *mapB = (int *) calloc(Ntotal,sizeof(int));
for (dlong e=0;e<pmesh->Nelements;e++) {
for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9;
for (int f=0;f<pmesh->Nfaces;f++) {
int bc = pmesh->EToB[f+e*pmesh->Nfaces];
if (bc>0) {
for (int n=0;n<pmesh->Nfp;n++) {
int BCFlag = elliptic->BCType[bc];
int fid = pmesh->faceNodes[n+f*pmesh->Nfp];
mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]);
}
}
}
}
ogsGatherScatter(mapB, ogsInt, ogsMin, pmesh->ogs);
//use the bc flags to find masked ids
for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) {
if (mapB[n] == 1) { //Dirichlet boundary
pmesh->maskedGlobalIds[n] = 0;
}
}
free(mapB);
} else {
//mask using the original mask
for (dlong n=0;n<elliptic->Nmasked;n++)
pmesh->maskedGlobalIds[elliptic->maskIds[n]] = 0;
}
//build masked gs handle
precon->FEMogs = ogsSetup(Ntotal, pmesh->maskedGlobalIds, mesh->comm, 0, verbose, mesh->device);
// number of degrees of freedom on this rank (after gathering)
hlong Ngather = precon->FEMogs->Ngather;
// create a global numbering system
hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong));
int *owner = (int *) calloc(Ngather,sizeof(int));
// every gathered degree of freedom has its own global id
hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong));
MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm);
for(int r=0;r<mesh->size;++r)
globalStarts[r+1] = globalStarts[r]+globalStarts[r+1];
//use the offsets to set a consecutive global numbering
for (dlong n =0;n<precon->FEMogs->Ngather;n++) {
globalIds[n] = n + globalStarts[mesh->rank];
owner[n] = mesh->rank;
}
//scatter this numbering to the original nodes
hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong));
int *globalOwners = (int *) calloc(Ntotal,sizeof(int));
for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1;
ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, precon->FEMogs);
ogsScatter(globalOwners, owner, ogsInt, ogsAdd, precon->FEMogs);
free(globalIds); free(owner);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//dont need these anymore
free(pmesh->vmapM);
free(pmesh->vmapP);
free(pmesh->mapP);
//maybe more cleanup can go here
}
if (elliptic->elementType==TRIANGLES) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
}
}
}
}
} else if (elliptic->elementType==TETRAHEDRA) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
}
}
}
}
}
if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout);
// Build non-zeros of stiffness matrix (unassembled)
dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements;
dlong cnt =0;
nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t));
int *AsendCounts = (int*) calloc(mesh->size, sizeof(int));
int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int));
int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int));
int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int));
//Build unassembed non-zeros
switch(elliptic->elementType){
case TRIANGLES:
BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case QUADRILATERALS:
BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case TETRAHEDRA:
BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case HEXAHEDRA:
BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
}
// Make the MPI_NONZERO_T data type
MPI_Datatype MPI_NONZERO_T;
MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT};
int blength[4] = {1, 1, 1, 1};
MPI_Aint addr[4], displ[4];
MPI_Get_address ( &(sendNonZeros[0] ), addr+0);
MPI_Get_address ( &(sendNonZeros[0].col ), addr+1);
MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2);
MPI_Get_address ( &(sendNonZeros[0].val ), addr+3);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
displ[3] = addr[3] - addr[0];
MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T);
MPI_Type_commit (&MPI_NONZERO_T);
// count how many non-zeros to send to each process
for(dlong n=0;n<cnt;++n)
AsendCounts[sendNonZeros[n].ownerRank]++;
// sort by row ordering
qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn);
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm);
// find send and recv offsets for gather
dlong nnz = 0;
for(int r=0;r<mesh->size;++r){
AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r];
ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r];
nnz += ArecvCounts[r];
}
nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t));
// determine number to receive
MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T,
A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T,
mesh->comm);
// sort received non-zero entries by row block (may need to switch compareRowColumn tests)
qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn);
// compress duplicates
cnt = 0;
for(dlong n=1;n<nnz;++n){
if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){
A[cnt].val += A[n].val;
} else{
++cnt;
A[cnt] = A[n];
}
}
if (nnz) cnt++;
nnz = cnt;
if(mesh->rank==0) printf("done.\n");
MPI_Barrier(mesh->comm);
MPI_Type_free(&MPI_NONZERO_T);
hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong));
hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong));
dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat));
for (dlong n=0;n<nnz;n++) {
Rows[n] = A[n].row;
Cols[n] = A[n].col;
Vals[n] = A[n].val;
}
free(A);
precon->parAlmond = parAlmond::Init(mesh->device, mesh->comm, options);
parAlmond::AMGSetup(precon->parAlmond,
globalStarts,
nnz,
Rows,
Cols,
Vals,
elliptic->allNeumann,
elliptic->allNeumannPenalty);
free(Rows); free(Cols); free(Vals);
if (options.compareArgs("VERBOSE", "TRUE"))
parAlmond::Report(precon->parAlmond);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
// //tell parAlmond not to gather this level (its done manually)
// agmgLevel *baseLevel = precon->parAlmond->levels[0];
// baseLevel->gatherLevel = false;
// baseLevel->weightedInnerProds = false;
// build interp and anterp
dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat));
for(int n=0;n<mesh->NpFEM;++n){
for(int m=0;m<mesh->Np;++m){
SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m];
}
}
mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp);
mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp);
free(SEMFEMAnterp);
precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
} else {
// //tell parAlmond to gather this level
// agmgLevel *baseLevel = precon->parAlmond->levels[0];
// baseLevel->gatherLevel = true;
parAlmond::multigridLevel *baseLevel = precon->parAlmond->levels[0];
precon->rhsG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat));
precon->xG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat));
precon->o_rhsG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat));
precon->o_xG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat));
// baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
// baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
// baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
// baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
// baseLevel->weightedInnerProds = false;
// baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*));
// baseLevel->gatherArgs[0] = (void *) elliptic;
// baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid
// baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx);
// baseLevel->scatterArgs = baseLevel->gatherArgs;
// baseLevel->device_gather = ellipticGather;
// baseLevel->device_scatter = ellipticScatter;
}
}
void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if (ny==my) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
id = mx+ny*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
if (nx==mx) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)) {
id = nx + ny*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID];
dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grt*femMesh->Srt[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += Gst*femMesh->Sst[m+n*femMesh->Np];
val += Grt*femMesh->Str[m+n*femMesh->Np];
val += Gst*femMesh->Sts[m+n*femMesh->Np];
val += Gtt*femMesh->Stt[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int nz=0;nz<femMesh->Nq;nz++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dlong idn = localIds[e*femMesh->Np + nn];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int mz=0;mz<femMesh->Nq;mz++) {
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dlong idm = localIds[e*femMesh->Np + mm];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if ((ny==my)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
if (nz==mz) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
}
if (ny==my) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if (nx==mx) {
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(ny==my)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq;
dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np];
val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)&&(nz==mz)) {
id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
// pack non-zero
dfloat nonZeroThreshold = 1e-7;
if (fabs(val) >= nonZeroThreshold) {
#pragma omp critical
{
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
}
}
|
IndexedFaceMesh.h
|
#ifndef __INDEXEDFACEMESH_H__
#define __INDEXEDFACEMESH_H__
#include <vector>
#include "Common/Common.h"
#include <iterator>
namespace Utilities
{
class IndexedFaceMesh
{
public:
struct Edge
{
unsigned int m_face[2];
unsigned int m_vert[2];
};
struct Face
{
unsigned int *m_edges;
};
// Stores the indices of each face connected to a specific vertex
struct VertexFaces
{
VertexFaces()
{
m_fIndices = 0;
m_numFaces = 0;
}
VertexFaces(VertexFaces const& other)
{
*this = other;
}
VertexFaces& operator=(VertexFaces const& other)
{
m_numFaces = other.m_numFaces;
m_fIndices = new unsigned int[m_numFaces];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64)
std::copy(other.m_fIndices, other.m_fIndices + m_numFaces,
stdext::unchecked_array_iterator<unsigned int*>(m_fIndices));
#else
std::copy(other.m_fIndices, other.m_fIndices + m_numFaces, m_fIndices);
#endif
return *this;
}
~VertexFaces()
{
delete[] m_fIndices;
}
unsigned int m_numFaces;
unsigned int* m_fIndices;
};
// Stores the indices of each edge connected to a specific vertex
struct VertexEdges
{
VertexEdges()
{
m_eIndices = 0;
m_numEdges = 0;
}
VertexEdges(VertexEdges const& other)
{
*this = other;
}
VertexEdges& operator=(VertexEdges const& other)
{
m_numEdges = other.m_numEdges;
m_eIndices = new unsigned int[m_numEdges];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64)
std::copy(other.m_eIndices, other.m_eIndices + m_numEdges,
stdext::unchecked_array_iterator<unsigned int*>(m_eIndices));
#else
std::copy(other.m_eIndices, other.m_eIndices + m_numEdges, m_eIndices);
#endif
return *this;
}
~VertexEdges()
{
delete[] m_eIndices;
}
unsigned int m_numEdges;
unsigned int* m_eIndices;
};
public:
typedef std::vector<unsigned int> Faces;
typedef std::vector<Vector3r, Alloc_Vector3r> FaceNormals;
typedef std::vector<Vector3r, Alloc_Vector3r> VertexNormals;
typedef std::vector<Face> FaceData;
typedef std::vector<Edge> Edges;
typedef std::vector<VertexFaces> VerticesFaces;
typedef std::vector<VertexEdges> VerticesEdges;
typedef std::vector<unsigned int> UVIndices;
typedef std::vector<Vector2r, Alloc_Vector2r> UVs;
protected:
unsigned int m_numPoints;
Faces m_indices;
Edges m_edges;
FaceData m_faces;
bool m_closed;
UVIndices m_uvIndices;
UVs m_uvs;
VerticesFaces m_verticesFaces;
VerticesEdges m_verticesEdges;
unsigned int m_verticesPerFace;
FaceNormals m_normals;
VertexNormals m_vertexNormals;
public:
IndexedFaceMesh(const unsigned int verticesPerFace = 3);
IndexedFaceMesh(IndexedFaceMesh const& other);
IndexedFaceMesh& operator=(IndexedFaceMesh const& other);
~IndexedFaceMesh();
void release();
bool isClosed() const;
void initMesh(const unsigned int nPoints, const unsigned int nEdges, const unsigned int nFaces);
void addFace(const unsigned int * const indices);
void addFace(const int * const indices);
void addUV(const Real u, const Real v);
void addUVIndex(const unsigned int index);
const Faces& getFaces() const { return m_indices; }
Faces& getFaces(){ return m_indices; }
const FaceNormals& getFaceNormals() const { return m_normals; }
FaceNormals& getFaceNormals(){ return m_normals; }
const VertexNormals& getVertexNormals() const { return m_vertexNormals; }
VertexNormals& getVertexNormals(){ return m_vertexNormals; }
Edges& getEdges() { return m_edges; }
const Edges& getEdges() const { return m_edges; }
const FaceData& getFaceData() const { return m_faces; }
const UVIndices& getUVIndices() const { return m_uvIndices; }
const UVs& getUVs() const { return m_uvs; }
const VerticesFaces& getVertexFaces() const { return m_verticesFaces; }
const VerticesEdges& getVertexEdges() const { return m_verticesEdges; }
unsigned int numVertices() const { return m_numPoints; }
unsigned int numFaces() const { return (unsigned int)m_indices.size() / m_verticesPerFace; }
unsigned int numEdges() const { return (unsigned int)m_edges.size(); }
unsigned int numUVs() const { return (unsigned int)m_uvs.size(); }
void copyUVs(const UVIndices& uvIndices, const UVs& uvs);
void buildNeighbors();
template<class PositionData>
void updateNormals(const PositionData &pd, const unsigned int offset);
template<class PositionData>
void updateVertexNormals(const PositionData &pd);
unsigned int getVerticesPerFace() const;
};
template<class PositionData>
void IndexedFaceMesh::updateNormals(const PositionData &pd, const unsigned int offset)
{
m_normals.resize(numFaces());
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int) numFaces(); i++)
{
// Get first three points of face
const Vector3r &a = pd.getPosition(m_indices[m_verticesPerFace*i] + offset);
const Vector3r &b = pd.getPosition(m_indices[m_verticesPerFace*i + 1] + offset);
const Vector3r &c = pd.getPosition(m_indices[m_verticesPerFace*i + 2] + offset);
// Create normal
Vector3r v1 = b - a;
Vector3r v2 = c - a;
m_normals[i] = v1.cross(v2);
m_normals[i].normalize();
}
}
}
template<class PositionData>
void IndexedFaceMesh::updateVertexNormals(const PositionData &pd)
{
m_vertexNormals.resize(numVertices());
for (unsigned int i = 0; i < numVertices(); i++)
{
m_vertexNormals[i].setZero();
}
for (unsigned int i = 0u; i < numFaces(); i++)
{
const Vector3r &n = m_normals[i];
m_vertexNormals[m_indices[m_verticesPerFace*i]] += n;
m_vertexNormals[m_indices[m_verticesPerFace*i + 1]] += n;
m_vertexNormals[m_indices[m_verticesPerFace*i + 2]] += n;
}
for (unsigned int i = 0; i < numVertices(); i++)
{
m_vertexNormals[i].normalize();
}
}
}
#endif
|
1590.c
|
/* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <[email protected]>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
GB_subref_phase0.c
|
//------------------------------------------------------------------------------
// GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_subref.h"
#define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen)
//------------------------------------------------------------------------------
// GB_find_Ap_start_end
//------------------------------------------------------------------------------
// Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector
// A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense
// vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then
// pA and pA_end are set to -1 to denote an empty list. The resulting pointers
// are then returned in Ap_start [kC] and Ap_end [kC].
static inline void GB_find_Ap_start_end
(
// input, not modified
const int64_t kA,
const int64_t *restrict Ap,
const int64_t *restrict Ai,
const int64_t avlen,
const int64_t imin,
const int64_t imax,
const int64_t kC,
const int64_t nzombies,
// output: Ap_start [kC] and Ap_end [kC]:
int64_t *restrict Ap_start,
int64_t *restrict Ap_end
)
{
//--------------------------------------------------------------------------
// get A(:,kA)
//--------------------------------------------------------------------------
int64_t pA = GBP (Ap, kA, avlen) ;
int64_t pA_end = GBP (Ap, kA+1, avlen) ;
int64_t ajnz = pA_end - pA ;
//--------------------------------------------------------------------------
// trim it to A(imin:imax,kA)
//--------------------------------------------------------------------------
if (ajnz == avlen)
{
//----------------------------------------------------------------------
// A (:,kA) is dense; use pA and pA_end as-is
//----------------------------------------------------------------------
;
}
else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin)
{
//----------------------------------------------------------------------
// intersection of A(:,kA) and imin:imax is empty
//----------------------------------------------------------------------
pA = -1 ;
pA_end = -1 ;
}
else
{
//----------------------------------------------------------------------
// A (:,kA) is sparse, with at least one entry
//----------------------------------------------------------------------
// trim the leading part of A(:,kA)
if (GB_Ai (pA) < imin)
{
bool found, is_zombie ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH_ZOMBIE (imin, Ai,
pA, pright, found, nzombies, is_zombie) ;
}
// trim the trailing part of A (:,kA)
if (imin == imax)
{
if (GB_Ai (pA) == imin)
{
// found the the single entry A (i,kA)
pA_end = pA + 1 ;
}
else
{
// A (i,kA) has not been found
pA = -1 ;
pA_end = -1 ;
}
}
else if (imax < GB_Ai (pA_end-1))
{
bool found, is_zombie ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH_ZOMBIE (imax, Ai,
pleft, pright, found, nzombies, is_zombie) ;
pA_end = (found) ? (pleft + 1) : pleft ;
}
#ifdef GB_DEBUG
ajnz = pA_end - pA ;
if (ajnz > 0 && Ap != NULL)
{
// A(imin:imax,kA) is now in Ai [pA:pA_end-1]
ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ;
ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ;
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
}
#endif
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// The result [pA:pA_end-1] defines the range of entries that need to be
// accessed for constructing C(:,kC).
Ap_start [kC] = pA ;
Ap_end [kC] = pA_end ;
}
//------------------------------------------------------------------------------
// GB_subref_phase0
//------------------------------------------------------------------------------
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Count, int64_t) ; \
}
GrB_Info GB_subref_phase0
(
// output
int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard
size_t *p_Ch_size,
int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC]
size_t *p_Ap_start_size,
int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1
size_t *p_Ap_end_size,
int64_t *p_Cnvec, // # of vectors in C
bool *p_need_qsort, // true if C must be sorted
int *p_Ikind, // kind of I
int64_t *p_nI, // length of I
int64_t Icolon [3], // for GB_RANGE, GB_STRIDE
int64_t *p_nJ, // length of J
// input, not modified
const GrB_Matrix A,
const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t ni, // length of I, or special
const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t nj, // length of J, or special
// const bool must_sort, // true if C must be returned sorted
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A for subref phase 0", GB0) ;
ASSERT (!GB_IS_BITMAP (A)) ; // GB_bitmap_subref is used instead
ASSERT (p_Ch != NULL) ;
ASSERT (p_Ap_start != NULL) ;
ASSERT (p_Ap_end != NULL) ;
ASSERT (p_Cnvec != NULL) ;
ASSERT (p_nJ != NULL) ;
ASSERT (p_Ikind != NULL) ;
ASSERT (p_nI != NULL) ;
ASSERT (Icolon != NULL) ;
ASSERT (I != NULL) ;
ASSERT (J != NULL) ;
GrB_Info info ;
(*p_Ch ) = NULL ;
(*p_Ap_start ) = NULL ;
(*p_Ap_end ) = NULL ;
(*p_Cnvec ) = 0 ;
(*p_need_qsort) = false ;
(*p_Ikind ) = 0 ;
(*p_nI ) = 0 ;
(*p_nJ ) = 0 ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed
int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed
int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ; // may be trimmed
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t nzombies = A->nzombies ;
//--------------------------------------------------------------------------
// check the properties of I and J
//--------------------------------------------------------------------------
// C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1
int64_t nI, nJ, Jcolon [3] ;
int Ikind, Jkind ;
GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ;
GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ;
bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ;
int64_t imin, imax, jmin, jmax ;
info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon,
&I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ;
if (info != GrB_SUCCESS)
{
// I invalid or out of memory
return (info) ;
}
info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon,
&J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ;
if (info != GrB_SUCCESS)
{
// J invalid or out of memory
return (info) ;
}
bool need_qsort = I_unsorted ;
//--------------------------------------------------------------------------
// determine if C is empty
//--------------------------------------------------------------------------
bool C_empty = (nI == 0 || nJ == 0) ;
//--------------------------------------------------------------------------
// trim the hyperlist of A
//--------------------------------------------------------------------------
// Ah, Ap, and anvec are modified to include just the vectors in range
// jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap
// pointers, and the scalar anvec. If J is ":", then jmin is zero and
// jmax is avdim-1, so there is nothing to trim from Ah. If C is empty,
// then Ah and Ap will not be accessed at all, so this can be skipped.
bool A_is_hyper = (Ah != NULL) ;
if (A_is_hyper && !C_empty)
{
//----------------------------------------------------------------------
// trim the leading end of Ah so that it starts with jmin:...
//----------------------------------------------------------------------
if (jmin > 0)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_SPLIT_BINARY_SEARCH (jmin, Ah, kleft, kright, found) ;
Ah += kleft ;
Ap += kleft ;
anvec -= kleft ;
}
//----------------------------------------------------------------------
// trim the trailing end of Ah so that it ends with ..:jmax
//----------------------------------------------------------------------
if (jmax < avdim-1)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_SPLIT_BINARY_SEARCH (jmax, Ah, kleft, kright, found) ;
anvec = (found) ? (kleft + 1) : kleft ;
}
// Ah has been trimmed
ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax));
}
// Ah may now be empty, after being trimmed
C_empty = C_empty || (anvec == 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
#define NTASKS_PER_THREAD 8
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1, ntasks = 1 ;
int ntasks_max = nthreads_max * NTASKS_PER_THREAD ;
#define GB_GET_NTHREADS_AND_NTASKS(work) \
{ \
nthreads = GB_nthreads (work, chunk, nthreads_max) ; \
ntasks = (nthreads == 1) ? 1 : (NTASKS_PER_THREAD * nthreads) ; \
ntasks = GB_IMIN (ntasks, work) ; \
ntasks = GB_IMAX (ntasks, 1) ; \
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Count, int64_t) ;
GB_WERK_PUSH (Count, ntasks_max+1, int64_t) ;
if (Count == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute Cnvec and determine the format of Ch
//--------------------------------------------------------------------------
// Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC]
// if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and
// jC == kC. jC is in the range 0 to nJ-1.
int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict Ap_start = NULL ; size_t Ap_start_size = 0 ;
int64_t *restrict Ap_end = NULL ; size_t Ap_end_size = 0 ;
int64_t Cnvec = 0 ;
int64_t jbegin = Jcolon [GxB_BEGIN] ;
int64_t jinc = Jcolon [GxB_INC ] ;
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
Cnvec = nJ ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec.
// so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed.
Cnvec = anvec ;
ASSERT (Cnvec <= nJ) ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend, but J is large
//----------------------------------------------------------------------
// The case for Jkind == GB_STRIDE can be done by either this method,
// or the one below. This takes O(anvec) time, and the one below
// takes O(nj*log2(anvec)), so use this method if anvec < nj * 64.
// Ch is a list of length Cnvec, where Cnvec is the length of
// the intersection of Ah and jbegin:jinc:jend.
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
// scan all of Ah and check each entry if it appears in J
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end, my_Cnvec = 0 ;
GB_PARTITION (kA_start, kA_end, anvec,
(jinc > 0) ? tid : (ntasks-tid-1), ntasks) ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
my_Cnvec++ ;
}
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
Cnvec = Count [ntasks] ;
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:end
//----------------------------------------------------------------------
// Ch is an explicit list: the intersection of Ah and J
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
// scan all of J and check each entry if it appears in Ah
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end, my_Cnvec = 0 ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found) my_Cnvec++ ;
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
Cnvec = Count [ntasks] ;
}
//--------------------------------------------------------------------------
// allocate Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
C_empty = C_empty || (Cnvec == 0) ;
// C is hypersparse if A is hypersparse, or if C is empty
bool C_is_hyper = A_is_hyper || C_empty ;
if (C_is_hyper)
{
Ch = GB_MALLOC (Cnvec, int64_t, &Ch_size) ;
if (Ch == NULL)
{
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
if (Cnvec > 0)
{
Ap_start = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_start_size) ;
Ap_end = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_end_size) ;
if (Ap_start == NULL || Ap_end == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
GB_FREE (&Ch, Ch_size) ;
GB_FREE_WORK (&Ap_start, Ap_start_size) ;
GB_FREE_WORK (&Ap_end, Ap_end_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// create Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
// For the (kC)th vector of C, which corresponds to the (kA)th vector of A,
// pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range
// of entries in A(imin:imax,kA).
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
int64_t jC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (jC = 0 ; jC < nJ ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax,
jC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is a shifted copy of the trimmed
// Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed.
int64_t kC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kC = 0 ; kC < Cnvec ; kC++)
{
int64_t kA = kC ;
int64_t jA = Ah [kA] ;
int64_t jC = jA - jmin ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend where jinc may be positive or negative
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning all
// vectors in Ah [0..anvec-1] and checking if they appear in the
// jbegin:jinc:jend sequence.
if (jinc > 0)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
else
{
int tid;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning the
// list J, or the entire jbegin:jinc:jend sequence. Each vector is
// then found in Ah, via binary search.
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found)
{
ASSERT (jA == Ah [kA]) ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// check result
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
int64_t jC = GBH (Ch, kC) ;
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
// jA is the corresponding (kA)th vector of A.
int64_t kA = 0 ;
int64_t pright = A->nvec - 1 ;
int64_t pA_start_all, pA_end_all ;
bool found = GB_lookup (A->h != NULL, A->h, A->p, A->vlen, &kA,
pright, jA, &pA_start_all, &pA_end_all) ;
if (found && A->h != NULL)
{
ASSERT (jA == A->h [kA]) ;
}
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t ajnz = pA_end - pA ;
if (ajnz == avlen)
{
// A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector.
// C(:,kC) will have exactly nI entries.
ASSERT (pA == pA_start_all) ;
ASSERT (pA_end == pA_end_all ) ;
;
}
else if (ajnz > 0)
{
// A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1]
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ;
}
else
{
// A(imin:imax,kA) and C(:,kC) are empty
;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*p_Ch ) = Ch ; (*p_Ch_size) = Ch_size ;
(*p_Ap_start ) = Ap_start ; (*p_Ap_start_size) = Ap_start_size ;
(*p_Ap_end ) = Ap_end ; (*p_Ap_end_size) = Ap_end_size ;
(*p_Cnvec ) = Cnvec ;
(*p_need_qsort) = need_qsort ;
(*p_Ikind ) = Ikind ;
(*p_nI ) = nI ;
(*p_nJ ) = nJ ;
return (GrB_SUCCESS) ;
}
|
BCAssem.c
|
/*
* BCAssem.c
*
* Created on: Oct 6, 2014
* Author: lurker
*/
#include "mex.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef SINGLE
#define REAL float
#else /* not SINGLE */
#define REAL double
#endif /* not SINGLE */
/*
*
* Boundary Condition Assembly.
*
* Do integral of test function and trial function on boundary.
*
* Input : Edges
*
* Output: [I, J, V] for building sparse matrix
*
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
REAL *np = mxGetPr(prhs[0]);
REAL *ne = mxGetPr(prhs[1]);
REAL *nodes = mxGetPr(prhs[2]);
int *edges = (int *)mxGetData(prhs[3]);
/* each boundary node takes 2 edges.*/
int nzmax = 4*(int)*ne;
plhs[0] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL);
plhs[2] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL);
REAL *pI = mxGetPr(plhs[0]);
REAL *pJ = mxGetPr(plhs[1]);
REAL *pV = mxGetPr(plhs[2]);
mwSize i, j, k;
mwSize node_1, node_2;
REAL length;
#pragma omp parallel for
for (i = 0; i < *ne; i++)
{
node_1 = edges[2*i];
node_2 = edges[2*i + 1];
length = sqrt(pow(nodes[2*node_1] - nodes[2*node_2],2) + pow(nodes[2*node_1 + 1] - nodes[2*node_2 + 1], 2));
for (j = 0; j < 2; j++)
{
for (k = 0; k < 2; k++)
{
*pI++ = edges[2*i + j] + 1;
*pJ++ = edges[2*i + k] + 1;
if (j != k)
{
*pV++ = length/3.0;
}
else
{
*pV++ = length/6.0;
}
}
}
}
}
|
convolutiondepthwise_3x3_pack4.h
|
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
__builtin_prefetch(r3 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _sum10 = _bias0;
v4f32 _sum11 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
_sum10 = __msa_fmadd_w(_sum10, _k00, _r10);
_sum10 = __msa_fmadd_w(_sum10, _k01, _r11);
_sum10 = __msa_fmadd_w(_sum10, _k02, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k00, _r11);
_sum11 = __msa_fmadd_w(_sum11, _k01, _r12);
_sum11 = __msa_fmadd_w(_sum11, _k02, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
_sum10 = __msa_fmadd_w(_sum10, _k10, _r20);
_sum10 = __msa_fmadd_w(_sum10, _k11, _r21);
_sum10 = __msa_fmadd_w(_sum10, _k12, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k10, _r21);
_sum11 = __msa_fmadd_w(_sum11, _k11, _r22);
_sum11 = __msa_fmadd_w(_sum11, _k12, _r23);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
_sum10 = __msa_fmadd_w(_sum10, _k20, _r30);
_sum10 = __msa_fmadd_w(_sum10, _k21, _r31);
_sum10 = __msa_fmadd_w(_sum10, _k22, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k20, _r31);
_sum11 = __msa_fmadd_w(_sum11, _k21, _r32);
_sum11 = __msa_fmadd_w(_sum11, _k22, _r33);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum10, outptr1, 0);
__msa_st_w((v4i32)_sum11, outptr1 + 4, 0);
outptr0 += 4 * 2;
outptr1 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
__builtin_prefetch(r3 + 96);
v4f32 _sum0 = _bias0;
v4f32 _sum1 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum1 = __msa_fmadd_w(_sum1, _k00, _r10);
_sum1 = __msa_fmadd_w(_sum1, _k01, _r11);
_sum1 = __msa_fmadd_w(_sum1, _k02, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum1 = __msa_fmadd_w(_sum1, _k10, _r20);
_sum1 = __msa_fmadd_w(_sum1, _k11, _r21);
_sum1 = __msa_fmadd_w(_sum1, _k12, _r22);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
_sum1 = __msa_fmadd_w(_sum1, _k20, _r30);
_sum1 = __msa_fmadd_w(_sum1, _k21, _r31);
_sum1 = __msa_fmadd_w(_sum1, _k22, _r32);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 128);
__builtin_prefetch(r1 + 128);
__builtin_prefetch(r2 + 128);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r01);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r03);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r11);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r13);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r21);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r23);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
v4f32 _sum00 = _bias0;
v4f32 _sum01 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k00, _r00);
_sum00 = __msa_fmadd_w(_sum00, _k01, _r01);
_sum00 = __msa_fmadd_w(_sum00, _k02, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k00, _r02);
_sum01 = __msa_fmadd_w(_sum01, _k01, _r03);
_sum01 = __msa_fmadd_w(_sum01, _k02, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k10, _r10);
_sum00 = __msa_fmadd_w(_sum00, _k11, _r11);
_sum00 = __msa_fmadd_w(_sum00, _k12, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k10, _r12);
_sum01 = __msa_fmadd_w(_sum01, _k11, _r13);
_sum01 = __msa_fmadd_w(_sum01, _k12, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
_sum00 = __msa_fmadd_w(_sum00, _k20, _r20);
_sum00 = __msa_fmadd_w(_sum00, _k21, _r21);
_sum00 = __msa_fmadd_w(_sum00, _k22, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k20, _r22);
_sum01 = __msa_fmadd_w(_sum01, _k21, _r23);
_sum01 = __msa_fmadd_w(_sum01, _k22, _r24);
__msa_st_w((v4i32)_sum00, outptr0, 0);
__msa_st_w((v4i32)_sum01, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
}
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(r1 + 96);
__builtin_prefetch(r2 + 96);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
vector.h
|
#ifndef VECTOR_H
#define VECTOR_H
#include <omp.h>
#include <sys/time.h>
#include "matrix.h"
ull vrows(ull r, // Matrix rows
ull c, // Matrix cols
ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by rows
*/
gettimeofday(&start, NULL);
{
ull i = 0, j = 0;
#pragma omp parallel private(i, j) shared(vB) num_threads(threads)
{
#pragma omp for
iterate(, i, A->rows) {
iterate(, j, A->cols) { vB(i) += vA(j) * A(i, j); }
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
printf("\tResult matrix is written to `vector.txt`\n");
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
ull vcols(ull r, ull c, ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by columns
*/
gettimeofday(&start, NULL);
{
ull i = 0, j = 0;
#pragma omp parallel private(i, j) shared(vB) num_threads(threads)
{
T dot = 0;
iterate(, i, A->rows) {
#pragma omp for
iterate(, j, A->cols) { dot += vA(j) * A(i, j); }
#pragma omp critical
{
vB(i) += dot;
dot = 0;
}
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
ull vblocks(ull r, ull c, ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by blocks
*/
gettimeofday(&start, NULL);
#pragma omp parallel shared(vB) num_threads(threads)
{
ull lt = omp_get_num_threads(),
bv = lt, // Vertical blocks
bh = lt; // Horizontal blocks
#pragma omp for collapse(2)
iterate(ull, iv, lt) {
iterate(ull, ih, lt) {
for (ull i = iv * A->rows / bv; i < (iv + 1) * A->rows / bv; ++i) {
for (ull j = ih * A->cols / bh; j < (ih + 1) * A->cols / bh; ++j) {
#pragma omp atomic
vB(i) += A(i, j) * vA(i);
}
}
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
#endif // VECTOR_H
|
GB_binop__ne_int32.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int32)
// A*D function (colscale): GB (_AxD__ne_int32)
// D*A function (rowscale): GB (_DxB__ne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int32)
// C=scalar+B GB (_bind1st__ne_int32)
// C=scalar+B' GB (_bind1st_tran__ne_int32)
// C=A+scalar GB (_bind2nd__ne_int32)
// C=A'+scalar GB (_bind2nd_tran__ne_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT32 || GxB_NO_NE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fc_compute.h
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/operators/math/blas.h"
namespace paddle {
namespace operators {
namespace math {
template <typename DeviceContext, typename T>
inline void FCCompute(const BlasT<DeviceContext, T>& blas, const int M,
const int N, const int K, const T* X, const T* W, T* Y,
const T* B = NULL, bool relu = false) {
blas.MatMul(M, N, K, X, W, Y);
if (B == NULL) {
return;
}
if (relu) {
auto compute =
jit::Get<jit::kVAddRelu, jit::XYZNTuples<T>, platform::CPUPlace>(N);
for (int i = 0; i < M; i++) {
T* dst = Y + i * N;
compute(B, dst, dst, N);
}
} else {
auto compute =
jit::Get<jit::kVAdd, jit::XYZNTuples<T>, platform::CPUPlace>(N);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < M; i++) {
T* dst = Y + i * N;
compute(B, dst, dst, N);
}
}
}
} // namespace math
} // namespace operators
} // namespace paddle
|
pol.c
|
/*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdlib.h>
#include "balance.h"
#include "elec.h"
#include "private.h"
#define POL_SCF_TOL 1.0e-10
#define POL_SCF_MAX_ITER 80
double efp_get_pol_damp_tt(double, double, double);
enum efp_result efp_compute_id_direct(struct efp *);
struct id_work_data {
double conv;
vec_t *id_new;
vec_t *id_conj_new;
};
double
efp_get_pol_damp_tt(double r, double pa, double pb)
{
double ab = sqrt(pa * pb);
double r2 = r * r;
return 1.0 - exp(-ab * r2) * (1.0 + ab * r2);
}
static double
efp_get_pol_damp_tt_grad(double r, double pa, double pb)
{
double ab = sqrt(pa * pb);
double r2 = r * r;
return -2.0 * exp(-ab * r2) * (ab * ab * r2);
}
static vec_t
get_multipole_field(const vec_t *xyz, const struct multipole_pt *mult_pt,
const struct swf *swf)
{
vec_t field = vec_zero;
vec_t dr = {
xyz->x - mult_pt->x - swf->cell.x,
xyz->y - mult_pt->y - swf->cell.y,
xyz->z - mult_pt->z - swf->cell.z
};
double t1, t2;
double r = vec_len(&dr);
double r3 = r * r * r;
double r5 = r3 * r * r;
double r7 = r5 * r * r;
/* charge */
field.x += swf->swf * mult_pt->monopole * dr.x / r3;
field.y += swf->swf * mult_pt->monopole * dr.y / r3;
field.z += swf->swf * mult_pt->monopole * dr.z / r3;
/* dipole */
t1 = vec_dot(&mult_pt->dipole, &dr);
field.x += swf->swf * (3.0 / r5 * t1 * dr.x - mult_pt->dipole.x / r3);
field.y += swf->swf * (3.0 / r5 * t1 * dr.y - mult_pt->dipole.y / r3);
field.z += swf->swf * (3.0 / r5 * t1 * dr.z - mult_pt->dipole.z / r3);
/* quadrupole */
t1 = quadrupole_sum(mult_pt->quadrupole, &dr);
t2 = mult_pt->quadrupole[quad_idx(0, 0)] * dr.x +
mult_pt->quadrupole[quad_idx(1, 0)] * dr.y +
mult_pt->quadrupole[quad_idx(2, 0)] * dr.z;
field.x += swf->swf * (-2.0 / r5 * t2 + 5.0 / r7 * t1 * dr.x);
t2 = mult_pt->quadrupole[quad_idx(0, 1)] * dr.x +
mult_pt->quadrupole[quad_idx(1, 1)] * dr.y +
mult_pt->quadrupole[quad_idx(2, 1)] * dr.z;
field.y += swf->swf * (-2.0 / r5 * t2 + 5.0 / r7 * t1 * dr.y);
t2 = mult_pt->quadrupole[quad_idx(0, 2)] * dr.x +
mult_pt->quadrupole[quad_idx(1, 2)] * dr.y +
mult_pt->quadrupole[quad_idx(2, 2)] * dr.z;
field.z += swf->swf * (-2.0 / r5 * t2 + 5.0 / r7 * t1 * dr.z);
/* octupole-polarizability interactions are ignored */
return field;
}
static vec_t
get_elec_field(const struct efp *efp, size_t frag_idx, size_t pt_idx)
{
const struct frag *fr_j = efp->frags + frag_idx;
const struct polarizable_pt *pt = fr_j->polarizable_pts + pt_idx;
vec_t elec_field = vec_zero;
for (size_t i = 0; i < efp->n_frag; i++) {
if (i == frag_idx || efp_skip_frag_pair(efp, i, frag_idx))
continue;
const struct frag *fr_i = efp->frags + i;
struct swf swf = efp_make_swf(efp, fr_i, fr_j);
/* field due to nuclei */
for (size_t j = 0; j < fr_i->n_atoms; j++) {
const struct efp_atom *at = fr_i->atoms + j;
vec_t dr = {
pt->x - at->x - swf.cell.x,
pt->y - at->y - swf.cell.y,
pt->z - at->z - swf.cell.z
};
double r = vec_len(&dr);
double r3 = r * r * r;
double p1 = 1.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
}
elec_field.x += swf.swf * at->znuc * dr.x / r3 * p1;
elec_field.y += swf.swf * at->znuc * dr.y / r3 * p1;
elec_field.z += swf.swf * at->znuc * dr.z / r3 * p1;
}
/* field due to multipoles */
for (size_t j = 0; j < fr_i->n_multipole_pts; j++) {
const struct multipole_pt *mult_pt =
fr_i->multipole_pts + j;
vec_t mult_field = get_multipole_field(CVEC(pt->x),
mult_pt, &swf);
vec_t dr = {
pt->x - mult_pt->x - swf.cell.x,
pt->y - mult_pt->y - swf.cell.y,
pt->z - mult_pt->z - swf.cell.z
};
double r = vec_len(&dr);
double p1 = 1.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
}
elec_field.x += mult_field.x * p1;
elec_field.y += mult_field.y * p1;
elec_field.z += mult_field.z * p1;
}
}
if (efp->opts.terms & EFP_TERM_AI_POL) {
/* field due to nuclei from ab initio subsystem */
for (size_t i = 0; i < efp->n_ptc; i++) {
vec_t dr = vec_sub(CVEC(pt->x), efp->ptc_xyz + i);
double r = vec_len(&dr);
double r3 = r * r * r;
elec_field.x += efp->ptc[i] * dr.x / r3;
elec_field.y += efp->ptc[i] * dr.y / r3;
elec_field.z += efp->ptc[i] * dr.z / r3;
}
}
return elec_field;
}
static enum efp_result
add_electron_density_field(struct efp *efp)
{
enum efp_result res;
vec_t *xyz, *field;
if (efp->get_electron_density_field == NULL)
return EFP_RESULT_SUCCESS;
xyz = (vec_t *)malloc(efp->n_polarizable_pts * sizeof(vec_t));
field = (vec_t *)malloc(efp->n_polarizable_pts * sizeof(vec_t));
for (size_t i = 0, idx = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++, idx++) {
struct polarizable_pt *pt = frag->polarizable_pts + j;
xyz[idx].x = pt->x;
xyz[idx].y = pt->y;
xyz[idx].z = pt->z;
}
}
if ((res = efp->get_electron_density_field(efp->n_polarizable_pts,
(const double *)xyz, (double *)field,
efp->get_electron_density_field_user_data)))
goto error;
for (size_t i = 0, idx = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++, idx++) {
struct polarizable_pt *pt = frag->polarizable_pts + j;
pt->elec_field_wf = field[idx];
}
}
error:
free(xyz);
free(field);
return res;
}
static void
compute_elec_field_range(struct efp *efp, size_t from, size_t to, void *data)
{
vec_t *elec_field = (vec_t *)data;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t i = from; i < to; i++) {
const struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++) {
elec_field[frag->polarizable_offset + j] =
get_elec_field(efp, i, j);
}
}
}
static enum efp_result
compute_elec_field(struct efp *efp)
{
vec_t *elec_field;
enum efp_result res;
elec_field = (vec_t *)calloc(efp->n_polarizable_pts, sizeof(vec_t));
efp_balance_work(efp, compute_elec_field_range, elec_field);
efp_allreduce((double *)elec_field, 3 * efp->n_polarizable_pts);
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++) {
frag->polarizable_pts[j].elec_field =
elec_field[frag->polarizable_offset + j];
frag->polarizable_pts[j].elec_field_wf = vec_zero;
}
}
free(elec_field);
if (efp->opts.terms & EFP_TERM_AI_POL)
if ((res = add_electron_density_field(efp)))
return res;
return EFP_RESULT_SUCCESS;
}
static void
get_induced_dipole_field(struct efp *efp, size_t frag_idx,
struct polarizable_pt *pt, vec_t *field, vec_t *field_conj)
{
struct frag *fr_i = efp->frags + frag_idx;
*field = vec_zero;
*field_conj = vec_zero;
for (size_t j = 0; j < efp->n_frag; j++) {
if (j == frag_idx || efp_skip_frag_pair(efp, frag_idx, j))
continue;
struct frag *fr_j = efp->frags + j;
struct swf swf = efp_make_swf(efp, fr_i, fr_j);
for (size_t jj = 0; jj < fr_j->n_polarizable_pts; jj++) {
struct polarizable_pt *pt_j = fr_j->polarizable_pts+jj;
size_t idx = fr_j->polarizable_offset+jj;
vec_t dr = {
pt->x - pt_j->x + swf.cell.x,
pt->y - pt_j->y + swf.cell.y,
pt->z - pt_j->z + swf.cell.z
};
double r = vec_len(&dr);
double r3 = r * r * r;
double r5 = r3 * r * r;
double t1 = vec_dot(&efp->indip[idx], &dr);
double t2 = vec_dot(&efp->indipconj[idx], &dr);
double p1 = 1.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
}
field->x -= swf.swf * p1 * (efp->indip[idx].x / r3 -
3.0 * t1 * dr.x / r5);
field->y -= swf.swf * p1 * (efp->indip[idx].y / r3 -
3.0 * t1 * dr.y / r5);
field->z -= swf.swf * p1 * (efp->indip[idx].z / r3 -
3.0 * t1 * dr.z / r5);
field_conj->x -= swf.swf * p1 *
(efp->indipconj[idx].x / r3 - 3.0 * t2 * dr.x / r5);
field_conj->y -= swf.swf * p1 *
(efp->indipconj[idx].y / r3 - 3.0 * t2 * dr.y / r5);
field_conj->z -= swf.swf * p1 *
(efp->indipconj[idx].z / r3 - 3.0 * t2 * dr.z / r5);
}
}
}
static void
compute_id_range(struct efp *efp, size_t from, size_t to, void *data)
{
double conv = 0.0;
vec_t *id_new, *id_conj_new;
id_new = ((struct id_work_data *)data)->id_new;
id_conj_new = ((struct id_work_data *)data)->id_conj_new;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) reduction(+:conv)
#endif
for (size_t i = from; i < to; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++) {
struct polarizable_pt *pt = frag->polarizable_pts + j;
size_t idx = frag->polarizable_offset + j;
vec_t field, field_conj;
/* electric field from other induced dipoles */
get_induced_dipole_field(efp, i, pt, &field,
&field_conj);
/* add field that doesn't change during scf */
field.x += pt->elec_field.x + pt->elec_field_wf.x;
field.y += pt->elec_field.y + pt->elec_field_wf.y;
field.z += pt->elec_field.z + pt->elec_field_wf.z;
field_conj.x += pt->elec_field.x + pt->elec_field_wf.x;
field_conj.y += pt->elec_field.y + pt->elec_field_wf.y;
field_conj.z += pt->elec_field.z + pt->elec_field_wf.z;
id_new[idx] = mat_vec(&pt->tensor, &field);
id_conj_new[idx] = mat_trans_vec(&pt->tensor,
&field_conj);
conv += vec_dist(&id_new[idx], &efp->indip[idx]);
conv += vec_dist(&id_conj_new[idx],
&efp->indipconj[idx]);
}
}
((struct id_work_data *)data)->conv += conv;
}
static double
pol_scf_iter(struct efp *efp)
{
struct id_work_data data;
size_t npts = efp->n_polarizable_pts;
data.conv = 0.0;
data.id_new = (vec_t *)calloc(npts, sizeof(vec_t));
data.id_conj_new = (vec_t *)calloc(npts, sizeof(vec_t));
efp_balance_work(efp, compute_id_range, &data);
efp_allreduce((double *)data.id_new, 3 * npts);
efp_allreduce((double *)data.id_conj_new, 3 * npts);
efp_allreduce(&data.conv, 1);
memcpy(efp->indip, data.id_new, npts * sizeof(vec_t));
memcpy(efp->indipconj, data.id_conj_new, npts * sizeof(vec_t));
free(data.id_new);
free(data.id_conj_new);
return data.conv / npts / 2;
}
static void
compute_energy_range(struct efp *efp, size_t from, size_t to, void *data)
{
double energy = 0.0;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) reduction(+:energy)
#endif
for (size_t i = from; i < to; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++) {
struct polarizable_pt *pt = frag->polarizable_pts + j;
size_t idx = frag->polarizable_offset + j;
energy += 0.5 * vec_dot(&efp->indipconj[idx],
&pt->elec_field_wf) -
0.5 * vec_dot(&efp->indip[idx],
&pt->elec_field);
}
}
*(double *)data += energy;
}
static enum efp_result
efp_compute_id_iterative(struct efp *efp)
{
memset(efp->indip, 0, efp->n_polarizable_pts * sizeof(vec_t));
memset(efp->indipconj, 0, efp->n_polarizable_pts * sizeof(vec_t));
for (size_t iter = 1; iter <= POL_SCF_MAX_ITER; iter++) {
if (pol_scf_iter(efp) < POL_SCF_TOL)
break;
if (iter == POL_SCF_MAX_ITER)
return EFP_RESULT_POL_NOT_CONVERGED;
}
return EFP_RESULT_SUCCESS;
}
enum efp_result
efp_compute_pol_energy(struct efp *efp, double *energy)
{
enum efp_result res;
assert(energy);
if ((res = compute_elec_field(efp)))
return res;
switch (efp->opts.pol_driver) {
case EFP_POL_DRIVER_ITERATIVE:
res = efp_compute_id_iterative(efp);
break;
case EFP_POL_DRIVER_DIRECT:
res = efp_compute_id_direct(efp);
break;
}
if (res)
return res;
*energy = 0.0;
efp_balance_work(efp, compute_energy_range, energy);
efp_allreduce(energy, 1);
return EFP_RESULT_SUCCESS;
}
static void
compute_grad_point(struct efp *efp, size_t frag_idx, size_t pt_idx)
{
const struct frag *fr_i = efp->frags + frag_idx;
const struct polarizable_pt *pt_i = fr_i->polarizable_pts + pt_idx;
size_t idx_i = fr_i->polarizable_offset + pt_idx;
vec_t force, add_i, add_j, force_, add_i_, add_j_;
double e;
vec_t dipole_i = {
0.5 * (efp->indip[idx_i].x + efp->indipconj[idx_i].x),
0.5 * (efp->indip[idx_i].y + efp->indipconj[idx_i].y),
0.5 * (efp->indip[idx_i].z + efp->indipconj[idx_i].z)
};
for (size_t j = 0; j < efp->n_frag; j++) {
if (j == frag_idx || efp_skip_frag_pair(efp, frag_idx, j))
continue;
struct frag *fr_j = efp->frags + j;
struct swf swf = efp_make_swf(efp, fr_i, fr_j);
/* energy without switching applied */
double energy = 0.0;
/* induced dipole - nuclei */
for (size_t k = 0; k < fr_j->n_atoms; k++) {
struct efp_atom *at_j = fr_j->atoms + k;
vec_t dr = {
at_j->x - pt_i->x - swf.cell.x,
at_j->y - pt_i->y - swf.cell.y,
at_j->z - pt_i->z - swf.cell.z
};
double p1 = 1.0, p2 = 0.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
double r = vec_len(&dr);
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
p2 = efp_get_pol_damp_tt_grad(r, fr_i->pol_damp,
fr_j->pol_damp);
}
e = -efp_charge_dipole_energy(at_j->znuc,
&dipole_i, &dr);
efp_charge_dipole_grad(at_j->znuc, &dipole_i, &dr,
&force, &add_j, &add_i);
vec_negate(&force);
vec_scale(&force, p1);
vec_scale(&add_i, p1);
vec_scale(&add_j, p1);
force.x += p2 * e * dr.x;
force.y += p2 * e * dr.y;
force.z += p2 * e * dr.z;
vec_scale(&force, swf.swf);
vec_scale(&add_i, swf.swf);
vec_scale(&add_j, swf.swf);
efp_add_force(efp->grad + frag_idx, CVEC(fr_i->x),
CVEC(pt_i->x), &force, &add_i);
efp_sub_force(efp->grad + j, CVEC(fr_j->x),
CVEC(at_j->x), &force, &add_j);
efp_add_stress(&swf.dr, &force, &efp->stress);
energy += p1 * e;
}
/* induced dipole - multipoles */
for (size_t k = 0; k < fr_j->n_multipole_pts; k++) {
struct multipole_pt *pt_j = fr_j->multipole_pts + k;
vec_t dr = {
pt_j->x - pt_i->x - swf.cell.x,
pt_j->y - pt_i->y - swf.cell.y,
pt_j->z - pt_i->z - swf.cell.z
};
double p1 = 1.0, p2 = 0.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
double r = vec_len(&dr);
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
p2 = efp_get_pol_damp_tt_grad(r, fr_i->pol_damp,
fr_j->pol_damp);
}
force = vec_zero;
add_i = vec_zero;
add_j = vec_zero;
/* induced dipole - charge */
e = -efp_charge_dipole_energy(pt_j->monopole,
&dipole_i, &dr);
efp_charge_dipole_grad(pt_j->monopole, &dipole_i, &dr,
&force_, &add_j_, &add_i_);
vec_negate(&force_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* induced dipole - dipole */
e += efp_dipole_dipole_energy(&dipole_i,
&pt_j->dipole, &dr);
efp_dipole_dipole_grad(&dipole_i, &pt_j->dipole, &dr,
&force_, &add_i_, &add_j_);
vec_negate(&add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* induced dipole - quadrupole */
e += efp_dipole_quadrupole_energy(&dipole_i,
pt_j->quadrupole, &dr);
efp_dipole_quadrupole_grad(&dipole_i, pt_j->quadrupole,
&dr, &force_, &add_i_, &add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* induced dipole - octupole interactions are ignored */
vec_scale(&force, p1);
vec_scale(&add_i, p1);
vec_scale(&add_j, p1);
force.x += p2 * e * dr.x;
force.y += p2 * e * dr.y;
force.z += p2 * e * dr.z;
vec_scale(&force, swf.swf);
vec_scale(&add_i, swf.swf);
vec_scale(&add_j, swf.swf);
efp_add_force(efp->grad + frag_idx, CVEC(fr_i->x),
CVEC(pt_i->x), &force, &add_i);
efp_sub_force(efp->grad + j, CVEC(fr_j->x),
CVEC(pt_j->x), &force, &add_j);
efp_add_stress(&swf.dr, &force, &efp->stress);
energy += p1 * e;
}
/* induced dipole - induced dipoles */
for (size_t jj = 0; jj < fr_j->n_polarizable_pts; jj++) {
struct polarizable_pt *pt_j = fr_j->polarizable_pts+jj;
size_t idx_j = fr_j->polarizable_offset+jj;
vec_t dr = {
pt_j->x - pt_i->x - swf.cell.x,
pt_j->y - pt_i->y - swf.cell.y,
pt_j->z - pt_i->z - swf.cell.z
};
vec_t half_dipole_i = {
0.5 * efp->indip[idx_i].x,
0.5 * efp->indip[idx_i].y,
0.5 * efp->indip[idx_i].z
};
double p1 = 1.0, p2 = 0.0;
if (efp->opts.pol_damp == EFP_POL_DAMP_TT) {
double r = vec_len(&dr);
p1 = efp_get_pol_damp_tt(r, fr_i->pol_damp,
fr_j->pol_damp);
p2 = efp_get_pol_damp_tt_grad(r, fr_i->pol_damp,
fr_j->pol_damp);
}
e = efp_dipole_dipole_energy(&half_dipole_i,
&efp->indipconj[idx_j], &dr);
efp_dipole_dipole_grad(&half_dipole_i,
&efp->indipconj[idx_j], &dr, &force,
&add_i, &add_j);
vec_negate(&add_j);
vec_scale(&force, p1);
vec_scale(&add_i, p1);
vec_scale(&add_j, p1);
force.x += p2 * e * dr.x;
force.y += p2 * e * dr.y;
force.z += p2 * e * dr.z;
vec_scale(&force, swf.swf);
vec_scale(&add_i, swf.swf);
vec_scale(&add_j, swf.swf);
efp_add_force(efp->grad + frag_idx, CVEC(fr_i->x),
CVEC(pt_i->x), &force, &add_i);
efp_sub_force(efp->grad + j, CVEC(fr_j->x),
CVEC(pt_j->x), &force, &add_j);
efp_add_stress(&swf.dr, &force, &efp->stress);
energy += p1 * e;
}
force.x = swf.dswf.x * energy;
force.y = swf.dswf.y * energy;
force.z = swf.dswf.z * energy;
six_atomic_add_xyz(efp->grad + frag_idx, &force);
six_atomic_sub_xyz(efp->grad + j, &force);
efp_add_stress(&swf.dr, &force, &efp->stress);
}
/* induced dipole - ab initio nuclei */
if (efp->opts.terms & EFP_TERM_AI_POL) {
for (size_t j = 0; j < efp->n_ptc; j++) {
vec_t dr = vec_sub(efp->ptc_xyz + j, CVEC(pt_i->x));
efp_charge_dipole_grad(efp->ptc[j], &dipole_i, &dr,
&force, &add_j, &add_i);
vec_negate(&add_i);
vec_atomic_add(efp->ptc_grad + j, &force);
efp_sub_force(efp->grad + frag_idx, CVEC(fr_i->x),
CVEC(pt_i->x), &force, &add_i);
}
}
}
static void
compute_grad_range(struct efp *efp, size_t from, size_t to, void *data)
{
(void)data;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t i = from; i < to; i++)
for (size_t j = 0; j < efp->frags[i].n_polarizable_pts; j++)
compute_grad_point(efp, i, j);
}
enum efp_result
efp_compute_pol(struct efp *efp)
{
enum efp_result res;
if (!(efp->opts.terms & EFP_TERM_POL) &&
!(efp->opts.terms & EFP_TERM_AI_POL))
return EFP_RESULT_SUCCESS;
if ((res = efp_compute_pol_energy(efp, &efp->energy.polarization)))
return res;
if (efp->do_gradient)
efp_balance_work(efp, compute_grad_range, NULL);
return EFP_RESULT_SUCCESS;
}
void
efp_update_pol(struct frag *frag)
{
for (size_t i = 0; i < frag->n_polarizable_pts; i++) {
efp_move_pt(CVEC(frag->x), &frag->rotmat,
CVEC(frag->lib->polarizable_pts[i].x),
VEC(frag->polarizable_pts[i].x));
const mat_t *in = &frag->lib->polarizable_pts[i].tensor;
mat_t *out = &frag->polarizable_pts[i].tensor;
efp_rotate_t2(&frag->rotmat, (const double *)in, (double *)out);
}
}
EFP_EXPORT enum efp_result
efp_get_electric_field(struct efp *efp, size_t frag_idx, const double *xyz,
double *field)
{
assert(efp);
assert(frag_idx < efp->n_frag);
assert(xyz);
assert(field);
const struct frag *frag = efp->frags + frag_idx;
vec_t elec_field = vec_zero;
for (size_t i = 0; i < efp->n_frag; i++) {
if (i == frag_idx || efp_skip_frag_pair(efp, i, frag_idx))
continue;
const struct frag *fr_i = efp->frags + i;
struct swf swf = efp_make_swf(efp, fr_i, frag);
/* field due to nuclei */
for (size_t j = 0; j < fr_i->n_atoms; j++) {
const struct efp_atom *at = fr_i->atoms + j;
vec_t dr = {
xyz[0] - at->x - swf.cell.x,
xyz[1] - at->y - swf.cell.y,
xyz[2] - at->z - swf.cell.z
};
double r = vec_len(&dr);
double r3 = r * r * r;
elec_field.x += swf.swf * at->znuc * dr.x / r3;
elec_field.y += swf.swf * at->znuc * dr.y / r3;
elec_field.z += swf.swf * at->znuc * dr.z / r3;
}
/* field due to multipoles */
for (size_t j = 0; j < fr_i->n_multipole_pts; j++) {
const struct multipole_pt *mpt = fr_i->multipole_pts+j;
vec_t mult_field = get_multipole_field(
(const vec_t *)xyz, mpt, &swf);
elec_field.x += mult_field.x;
elec_field.y += mult_field.y;
elec_field.z += mult_field.z;
}
/* field due to induced dipoles */
for (size_t j = 0; j < fr_i->n_polarizable_pts; j++) {
struct polarizable_pt *pt_i = fr_i->polarizable_pts + j;
size_t idx = fr_i->polarizable_offset + j;
vec_t dr = {
xyz[0] - pt_i->x - swf.cell.x,
xyz[1] - pt_i->y - swf.cell.y,
xyz[2] - pt_i->z - swf.cell.z
};
double r = vec_len(&dr);
double r3 = r * r * r;
double r5 = r3 * r * r;
double t1 = vec_dot(&efp->indip[idx], &dr);
elec_field.x -= swf.swf * (efp->indip[idx].x / r3 -
3.0 * t1 * dr.x / r5);
elec_field.y -= swf.swf * (efp->indip[idx].y / r3 -
3.0 * t1 * dr.y / r5);
elec_field.z -= swf.swf * (efp->indip[idx].z / r3 -
3.0 * t1 * dr.z / r5);
}
}
if (efp->opts.terms & EFP_TERM_AI_POL) {
/* field due to nuclei from ab initio subsystem */
for (size_t i = 0; i < efp->n_ptc; i++) {
vec_t dr = vec_sub((const vec_t *)xyz, efp->ptc_xyz+i);
double r = vec_len(&dr);
double r3 = r * r * r;
elec_field.x += efp->ptc[i] * dr.x / r3;
elec_field.y += efp->ptc[i] * dr.y / r3;
elec_field.z += efp->ptc[i] * dr.z / r3;
}
}
*((vec_t *)field) = elec_field;
return EFP_RESULT_SUCCESS;
}
|
GB_apply_op.c
|
//------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Cx = op ((xtype) Ax)
// Compare with GB_transpose_op.c
#include "GB_apply.h"
#ifndef GBCOMPACT
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
#endif
void GB_apply_op // apply a unary operator, Cx = op ((xtype) Ax)
(
GB_void *restrict Cx, // output array, of type op->ztype
const GrB_UnaryOp op, // operator to apply
const GB_void *restrict Ax, // input array, of type Atype
const GrB_Type Atype, // type of Ax
const int64_t anz, // size of Ax and Cx
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT (Ax != NULL) ;
ASSERT (anz >= 0) ;
ASSERT (Atype != NULL) ;
ASSERT (op != NULL) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------------------
// FUTURE:: these operators could be renamed:
// GrB_AINV_BOOL and GxB_ABS_BOOL to GrB_IDENTITY_BOOL.
// GrB_MINV_BOOL to GxB_ONE_BOOL.
// GxB_ABS_UINT* to GrB_IDENTITY_UINT*.
// and then these workers would not need to be created.
#define GB_unop(op,zname,aname) GB_unop_ ## op ## zname ## aname
#define GB_WORKER(op,zname,ztype,aname,atype) \
{ \
GrB_Info info = GB_unop (op,zname,aname) ((ztype *) Cx, \
(const atype *) Ax, anz, nthreads) ; \
if (info == GrB_SUCCESS) return ; \
} \
break ;
//--------------------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------------------
#ifndef GBCOMPACT
#include "GB_unaryop_factory.c"
#endif
//--------------------------------------------------------------------------
// generic worker: typecast and apply an operator
//--------------------------------------------------------------------------
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_cast_function
cast_A_to_X = GB_cast_factory (op->xtype->code, Atype->code) ;
GxB_unary_function fop = op->function ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
// xwork = (xtype) Ax [p]
GB_void xwork [xsize] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
|
transformdyn-impl.h
|
//==================================================================================
// BSD 2-Clause License
//
// Copyright (c) 2014-2022, NJIT, Duality Technologies Inc. and other contributors
//
// All rights reserved.
//
// Author TPOC: [email protected]
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//==================================================================================
#ifndef __TRANSFORMDYN_IMPL_H__
#define __TRANSFORMDYN_IMPL_H__
// ATTENTION: this file contains implementations of the functions
// declared in math/bigintdyn/transformdyn.h and
// MUST be included in the end of math/bigintdyn/transformdyn.h ONLY
// and nowhere else
#include "utils/exception.h"
#include "utils/utilities.h"
#include "math/nbtheory.h"
#include <map>
#include <vector>
namespace bigintdyn {
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_cycloOrderInverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_cycloOrderInversePreconTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_rootOfUnityReverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_rootOfUnityInverseReverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_rootOfUnityPreconReverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformFTTDyn<VecType>::m_rootOfUnityInversePreconReverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType> ChineseRemainderTransformArbDyn<VecType>::m_cyclotomicPolyMap;
template <typename VecType>
std::map<typename VecType::Integer, VecType> ChineseRemainderTransformArbDyn<VecType>::m_cyclotomicPolyReverseNTTMap;
template <typename VecType>
std::map<typename VecType::Integer, VecType> ChineseRemainderTransformArbDyn<VecType>::m_cyclotomicPolyNTTMap;
template <typename VecType>
std::map<ModulusRoot<typename VecType::Integer>, VecType> BluesteinFFTDyn<VecType>::m_rootOfUnityTableByModulusRoot;
template <typename VecType>
std::map<ModulusRoot<typename VecType::Integer>, VecType>
BluesteinFFTDyn<VecType>::m_rootOfUnityInverseTableByModulusRoot;
template <typename VecType>
std::map<ModulusRoot<typename VecType::Integer>, VecType> BluesteinFFTDyn<VecType>::m_powersTableByModulusRoot;
template <typename VecType>
std::map<ModulusRootPair<typename VecType::Integer>, VecType> BluesteinFFTDyn<VecType>::m_RBTableByModulusRootPair;
template <typename VecType>
std::map<typename VecType::Integer, ModulusRoot<typename VecType::Integer>>
BluesteinFFTDyn<VecType>::m_defaultNTTModulusRoot;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformArbDyn<VecType>::m_rootOfUnityDivisionTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, VecType>
ChineseRemainderTransformArbDyn<VecType>::m_rootOfUnityDivisionInverseTableByModulus;
template <typename VecType>
std::map<typename VecType::Integer, typename VecType::Integer>
ChineseRemainderTransformArbDyn<VecType>::m_DivisionNTTModulus;
template <typename VecType>
std::map<typename VecType::Integer, typename VecType::Integer>
ChineseRemainderTransformArbDyn<VecType>::m_DivisionNTTRootOfUnity;
template <typename VecType>
std::map<usint, usint> ChineseRemainderTransformArbDyn<VecType>::m_nttDivisionDim;
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::ForwardTransformIterative(const VecType& element,
const VecType& rootOfUnityTable, VecType* result) {
usint n = element.GetLength();
if (result->GetLength() != n) {
OPENFHE_THROW(lbcrypto::math_error, "size of input element and size of output element not of same size");
}
auto modulus = element.GetModulus();
IntType mu = modulus.ComputeMu();
result->SetModulus(modulus);
usint msb = lbcrypto::GetMSB64(n - 1);
for (size_t i = 0; i < n; i++) {
(*result)[i] = element[lbcrypto::ReverseBits(i, msb)];
}
IntType omega, omegaFactor, oddVal, evenVal;
usint logm, i, j, indexEven, indexOdd;
usint logn = lbcrypto::GetMSB64(n - 1);
for (logm = 1; logm <= logn; logm++) {
// calculate the i indexes into the root table one time per loop
std::vector<usint> indexes(1 << (logm - 1));
for (i = 0; i < (usint)(1 << (logm - 1)); i++) {
indexes[i] = (i << (logn - logm));
}
for (j = 0; j < n; j = j + (1 << logm)) {
for (i = 0; i < (usint)(1 << (logm - 1)); i++) {
omega = rootOfUnityTable[indexes[i]];
indexEven = j + i;
indexOdd = indexEven + (1 << (logm - 1));
oddVal = (*result)[indexOdd];
omegaFactor = omega.ModMul(oddVal, modulus, mu);
evenVal = (*result)[indexEven];
oddVal = evenVal;
oddVal += omegaFactor;
if (oddVal >= modulus) {
oddVal -= modulus;
}
if (evenVal < omegaFactor) {
evenVal += modulus;
}
evenVal -= omegaFactor;
(*result)[indexEven] = oddVal;
(*result)[indexOdd] = evenVal;
}
}
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::InverseTransformIterative(const VecType& element,
const VecType& rootOfUnityInverseTable,
VecType* result) {
usint n = element.GetLength();
IntType modulus = element.GetModulus();
IntType mu = modulus.ComputeMu();
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(element, rootOfUnityInverseTable, result);
IntType cycloOrderInv(IntType(n).ModInverse(modulus));
for (usint i = 0; i < n; i++) {
(*result)[i].ModMulEq(cycloOrderInv, modulus, mu);
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::ForwardTransformToBitReverseInPlace(const VecType& rootOfUnityTable,
VecType* element) {
usint n = element->GetLength();
IntType modulus = element->GetModulus();
IntType mu = modulus.ComputeMu();
usint i, m, j1, j2, indexOmega, indexLo, indexHi;
IntType omega, omegaFactor, loVal, hiVal, zero(0);
usint t = (n >> 1);
usint logt1 = lbcrypto::GetMSB64(t);
for (m = 1; m < n; m <<= 1) {
for (i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityTable[indexOmega];
for (indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
loVal = (*element)[indexLo];
omegaFactor = (*element)[indexHi];
omegaFactor.ModMulFastEq(omega, modulus, mu);
hiVal = loVal + omegaFactor;
if (hiVal >= modulus) {
hiVal -= modulus;
}
if (loVal < omegaFactor) {
loVal += modulus;
}
loVal -= omegaFactor;
(*element)[indexLo] = hiVal;
(*element)[indexHi] = loVal;
}
}
t >>= 1;
logt1--;
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::ForwardTransformToBitReverse(const VecType& element,
const VecType& rootOfUnityTable,
VecType* result) {
usint n = element.GetLength();
if (result->GetLength() != n) {
OPENFHE_THROW(lbcrypto::math_error, "size of input element and size of output element not of same size");
}
IntType modulus = element.GetModulus();
IntType mu = modulus.ComputeMu();
result->SetModulus(modulus);
usint i, m, j1, j2, indexOmega, indexLo, indexHi;
IntType omega, omegaFactor, loVal, hiVal, zero(0);
for (i = 0; i < n; ++i) {
(*result)[i] = element[i];
}
usint t = (n >> 1);
usint logt1 = lbcrypto::GetMSB64(t);
for (m = 1; m < n; m <<= 1) {
for (i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityTable[indexOmega];
for (indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
loVal = (*result)[indexLo];
omegaFactor = (*result)[indexHi];
if (omegaFactor != zero) {
omegaFactor.ModMulFastEq(omega, modulus, mu);
hiVal = loVal + omegaFactor;
if (hiVal >= modulus) {
hiVal -= modulus;
}
if (loVal < omegaFactor) {
loVal += modulus;
}
loVal -= omegaFactor;
(*result)[indexLo] = hiVal;
(*result)[indexHi] = loVal;
}
else {
(*result)[indexHi] = loVal;
}
}
}
t >>= 1;
logt1--;
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::ForwardTransformToBitReverseInPlace(const VecType& rootOfUnityTable,
const VecType& preconRootOfUnityTable,
VecType* element) {
usint n = element->GetLength();
IntType modulus = element->GetModulus();
uint32_t indexOmega, indexHi;
IntType preconOmega;
IntType omega, omegaFactor, loVal, hiVal, zero(0);
usint t = (n >> 1);
usint logt1 = lbcrypto::GetMSB64(t);
for (uint32_t m = 1; m < n; m <<= 1, t >>= 1, --logt1) {
uint32_t j1, j2;
for (uint32_t i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityTable[indexOmega];
preconOmega = preconRootOfUnityTable[indexOmega];
for (uint32_t indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
loVal = (*element)[indexLo];
omegaFactor = (*element)[indexHi];
omegaFactor.ModMulFastConstEq(omega, modulus, preconOmega);
hiVal = loVal + omegaFactor;
if (hiVal >= modulus) {
hiVal -= modulus;
}
if (loVal < omegaFactor) {
loVal += modulus;
}
loVal -= omegaFactor;
(*element)[indexLo] = hiVal;
(*element)[indexHi] = loVal;
}
}
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::ForwardTransformToBitReverse(const VecType& element,
const VecType& rootOfUnityTable,
const VecType& preconRootOfUnityTable,
VecType* result) {
usint n = element.GetLength();
if (result->GetLength() != n) {
OPENFHE_THROW(lbcrypto::math_error, "size of input element and size of output element not of same size");
}
IntType modulus = element.GetModulus();
result->SetModulus(modulus);
for (uint32_t i = 0; i < n; ++i) {
(*result)[i] = element[i];
}
uint32_t indexOmega, indexHi;
IntType preconOmega;
IntType omega, omegaFactor, loVal, hiVal, zero(0);
usint t = (n >> 1);
usint logt1 = lbcrypto::GetMSB64(t);
for (uint32_t m = 1; m < n; m <<= 1, t >>= 1, --logt1) {
uint32_t j1, j2;
for (uint32_t i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityTable[indexOmega];
preconOmega = preconRootOfUnityTable[indexOmega];
for (uint32_t indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
loVal = (*result)[indexLo];
omegaFactor = (*result)[indexHi];
if (omegaFactor != zero) {
omegaFactor.ModMulFastConstEq(omega, modulus, preconOmega);
hiVal = loVal + omegaFactor;
if (hiVal >= modulus) {
hiVal -= modulus;
}
if (loVal < omegaFactor) {
loVal += modulus;
}
loVal -= omegaFactor;
(*result)[indexLo] = hiVal;
(*result)[indexHi] = loVal;
}
else {
(*result)[indexHi] = loVal;
}
}
}
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::InverseTransformFromBitReverseInPlace(const VecType& rootOfUnityInverseTable,
const IntType& cycloOrderInv,
VecType* element) {
usint n = element->GetLength();
IntType modulus = element->GetModulus();
IntType mu = modulus.ComputeMu();
IntType loVal, hiVal, omega, omegaFactor;
usint i, m, j1, j2, indexOmega, indexLo, indexHi;
usint t = 1;
usint logt1 = 1;
for (m = (n >> 1); m >= 1; m >>= 1) {
for (i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityInverseTable[indexOmega];
for (indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
hiVal = (*element)[indexHi];
loVal = (*element)[indexLo];
omegaFactor = loVal;
if (omegaFactor < hiVal) {
omegaFactor += modulus;
}
omegaFactor -= hiVal;
loVal += hiVal;
if (loVal >= modulus) {
loVal -= modulus;
}
omegaFactor.ModMulFastEq(omega, modulus, mu);
(*element)[indexLo] = loVal;
(*element)[indexHi] = omegaFactor;
}
}
t <<= 1;
logt1++;
}
for (i = 0; i < n; i++) {
(*element)[i].ModMulFastEq(cycloOrderInv, modulus, mu);
}
return;
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::InverseTransformFromBitReverse(const VecType& element,
const VecType& rootOfUnityInverseTable,
const IntType& cycloOrderInv,
VecType* result) {
usint n = element.GetLength();
if (result->GetLength() != n) {
OPENFHE_THROW(lbcrypto::math_error, "size of input element and size of output element not of same size");
}
result->SetModulus(element.GetModulus());
for (usint i = 0; i < n; i++) {
(*result)[i] = element[i];
}
InverseTransformFromBitReverseInPlace(rootOfUnityInverseTable, cycloOrderInv, result);
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::InverseTransformFromBitReverseInPlace(
const VecType& rootOfUnityInverseTable, const VecType& preconRootOfUnityInverseTable, const IntType& cycloOrderInv,
const IntType& preconCycloOrderInv, VecType* element) {
usint n = element->GetLength();
IntType modulus = element->GetModulus();
IntType loVal, hiVal, omega, omegaFactor;
IntType preconOmega;
usint i, m, j1, j2, indexOmega, indexLo, indexHi;
usint t = 1;
usint logt1 = 1;
for (m = (n >> 1); m >= 1; m >>= 1) {
for (i = 0; i < m; ++i) {
j1 = i << logt1;
j2 = j1 + t;
indexOmega = m + i;
omega = rootOfUnityInverseTable[indexOmega];
preconOmega = preconRootOfUnityInverseTable[indexOmega];
for (indexLo = j1; indexLo < j2; ++indexLo) {
indexHi = indexLo + t;
hiVal = (*element)[indexHi];
loVal = (*element)[indexLo];
omegaFactor = loVal;
if (omegaFactor < hiVal) {
omegaFactor += modulus;
}
omegaFactor -= hiVal;
loVal += hiVal;
if (loVal >= modulus) {
loVal -= modulus;
}
omegaFactor.ModMulFastConstEq(omega, modulus, preconOmega);
(*element)[indexLo] = loVal;
(*element)[indexHi] = omegaFactor;
}
}
t <<= 1;
logt1++;
}
for (i = 0; i < n; i++) {
(*element)[i].ModMulFastConstEq(cycloOrderInv, modulus, preconCycloOrderInv);
}
}
template <typename VecType>
void NumberTheoreticTransformDyn<VecType>::InverseTransformFromBitReverse(
const VecType& element, const VecType& rootOfUnityInverseTable, const VecType& preconRootOfUnityInverseTable,
const IntType& cycloOrderInv, const IntType& preconCycloOrderInv, VecType* result) {
usint n = element.GetLength();
if (result->GetLength() != n) {
OPENFHE_THROW(lbcrypto::math_error, "size of input element and size of output element not of same size");
}
result->SetModulus(element.GetModulus());
for (usint i = 0; i < n; i++) {
(*result)[i] = element[i];
}
InverseTransformFromBitReverseInPlace(rootOfUnityInverseTable, preconRootOfUnityInverseTable, cycloOrderInv,
preconCycloOrderInv, result);
return;
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::ForwardTransformToBitReverseInPlace(const IntType& rootOfUnity,
const usint CycloOrder,
VecType* element) {
if (rootOfUnity == IntType(1) || rootOfUnity == IntType(0)) {
return;
}
if (!lbcrypto::IsPowerOfTwo(CycloOrder)) {
OPENFHE_THROW(lbcrypto::math_error, "CyclotomicOrder is not a power of two");
}
usint CycloOrderHf = (CycloOrder >> 1);
if (element->GetLength() != CycloOrderHf) {
OPENFHE_THROW(lbcrypto::math_error, "element size must be equal to CyclotomicOrder / 2");
}
IntType modulus = element->GetModulus();
auto mapSearch = m_rootOfUnityReverseTableByModulus.find(modulus);
if (mapSearch == m_rootOfUnityReverseTableByModulus.end() || mapSearch->second.GetLength() != CycloOrderHf) {
PreCompute(rootOfUnity, CycloOrder, modulus);
}
// if (typeid(IntType) == typeid(NativeInteger)) {
// NumberTheoreticTransformDyn<VecType>().ForwardTransformToBitReverseInPlace(
// m_rootOfUnityReverseTableByModulus[modulus],
// m_rootOfUnityPreconReverseTableByModulus[modulus], element);
// } else {
NumberTheoreticTransformDyn<VecType>().ForwardTransformToBitReverseInPlace(
m_rootOfUnityReverseTableByModulus[modulus], element);
// }
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::ForwardTransformToBitReverse(const VecType& element,
const IntType& rootOfUnity,
const usint CycloOrder, VecType* result) {
if (rootOfUnity == IntType(1) || rootOfUnity == IntType(0)) {
*result = element;
return;
}
if (!lbcrypto::IsPowerOfTwo(CycloOrder)) {
OPENFHE_THROW(lbcrypto::math_error, "CyclotomicOrder is not a power of two");
}
usint CycloOrderHf = (CycloOrder >> 1);
if (result->GetLength() != CycloOrderHf) {
OPENFHE_THROW(lbcrypto::math_error, "result size must be equal to CyclotomicOrder / 2");
}
IntType modulus = element.GetModulus();
auto mapSearch = m_rootOfUnityReverseTableByModulus.find(modulus);
if (mapSearch == m_rootOfUnityReverseTableByModulus.end() || mapSearch->second.GetLength() != CycloOrderHf) {
PreCompute(rootOfUnity, CycloOrder, modulus);
}
// if (typeid(IntType) == typeid(NativeInteger)) {
// NumberTheoreticTransformDyn<VecType>().ForwardTransformToBitReverse(
// element, m_rootOfUnityReverseTableByModulus[modulus],
// m_rootOfUnityPreconReverseTableByModulus[modulus], result);
// } else {
NumberTheoreticTransformDyn<VecType>().ForwardTransformToBitReverse(
element, m_rootOfUnityReverseTableByModulus[modulus], result);
// }
return;
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::InverseTransformFromBitReverseInPlace(const IntType& rootOfUnity,
const usint CycloOrder,
VecType* element) {
if (rootOfUnity == IntType(1) || rootOfUnity == IntType(0)) {
return;
}
if (!lbcrypto::IsPowerOfTwo(CycloOrder)) {
OPENFHE_THROW(lbcrypto::math_error, "CyclotomicOrder is not a power of two");
}
usint CycloOrderHf = (CycloOrder >> 1);
if (element->GetLength() != CycloOrderHf) {
OPENFHE_THROW(lbcrypto::math_error, "element size must be equal to CyclotomicOrder / 2");
}
IntType modulus = element->GetModulus();
auto mapSearch = m_rootOfUnityReverseTableByModulus.find(modulus);
if (mapSearch == m_rootOfUnityReverseTableByModulus.end() || mapSearch->second.GetLength() != CycloOrderHf) {
PreCompute(rootOfUnity, CycloOrder, modulus);
}
usint msb = lbcrypto::GetMSB64(CycloOrderHf - 1);
// if (typeid(IntType) == typeid(NativeInteger)) {
// NumberTheoreticTransformDyn<VecType>().InverseTransformFromBitReverseInPlace(
// m_rootOfUnityInverseReverseTableByModulus[modulus],
// m_rootOfUnityInversePreconReverseTableByModulus[modulus],
// m_cycloOrderInverseTableByModulus[modulus][msb],
// m_cycloOrderInversePreconTableByModulus[modulus][msb], element);
// } else {
NumberTheoreticTransformDyn<VecType>().InverseTransformFromBitReverseInPlace(
m_rootOfUnityInverseReverseTableByModulus[modulus], m_cycloOrderInverseTableByModulus[modulus][msb], element);
// }
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::InverseTransformFromBitReverse(const VecType& element,
const IntType& rootOfUnity,
const usint CycloOrder, VecType* result) {
if (rootOfUnity == IntType(1) || rootOfUnity == IntType(0)) {
*result = element;
return;
}
if (!lbcrypto::IsPowerOfTwo(CycloOrder)) {
OPENFHE_THROW(lbcrypto::math_error, "CyclotomicOrder is not a power of two");
}
usint CycloOrderHf = (CycloOrder >> 1);
if (result->GetLength() != CycloOrderHf) {
OPENFHE_THROW(lbcrypto::math_error, "result size must be equal to CyclotomicOrder / 2");
}
IntType modulus = element.GetModulus();
auto mapSearch = m_rootOfUnityReverseTableByModulus.find(modulus);
if (mapSearch == m_rootOfUnityReverseTableByModulus.end() || mapSearch->second.GetLength() != CycloOrderHf) {
PreCompute(rootOfUnity, CycloOrder, modulus);
}
usint n = element.GetLength();
result->SetModulus(element.GetModulus());
for (usint i = 0; i < n; i++) {
(*result)[i] = element[i];
}
usint msb = lbcrypto::GetMSB64(CycloOrderHf - 1);
// if (typeid(IntType) == typeid(NativeInteger)) {
// NumberTheoreticTransformDyn<VecType>().InverseTransformFromBitReverseInPlace(
// m_rootOfUnityInverseReverseTableByModulus[modulus],
// m_rootOfUnityInversePreconReverseTableByModulus[modulus],
// m_cycloOrderInverseTableByModulus[modulus][msb],
// m_cycloOrderInversePreconTableByModulus[modulus][msb], result);
// } else {
NumberTheoreticTransformDyn<VecType>().InverseTransformFromBitReverseInPlace(
m_rootOfUnityInverseReverseTableByModulus[modulus], m_cycloOrderInverseTableByModulus[modulus][msb], result);
// }
return;
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::PreCompute(const IntType& rootOfUnity, const usint CycloOrder,
const IntType& modulus) {
// Half of cyclo order
usint CycloOrderHf = (CycloOrder >> 1);
auto mapSearch = m_rootOfUnityReverseTableByModulus.find(modulus);
if (mapSearch == m_rootOfUnityReverseTableByModulus.end() || mapSearch->second.GetLength() != CycloOrderHf) {
#pragma omp critical
{
IntType x(1), xinv(1);
usint msb = lbcrypto::GetMSB64(CycloOrderHf - 1);
IntType mu = modulus.ComputeMu();
VecType Table(CycloOrderHf, modulus);
VecType TableI(CycloOrderHf, modulus);
IntType rootOfUnityInverse = rootOfUnity.ModInverse(modulus);
usint iinv;
for (usint i = 0; i < CycloOrderHf; i++) {
iinv = lbcrypto::ReverseBits(i, msb);
Table[iinv] = x;
TableI[iinv] = xinv;
x.ModMulEq(rootOfUnity, modulus, mu);
xinv.ModMulEq(rootOfUnityInverse, modulus, mu);
}
m_rootOfUnityReverseTableByModulus[modulus] = Table;
m_rootOfUnityInverseReverseTableByModulus[modulus] = TableI;
VecType TableCOI(msb + 1, modulus);
for (usint i = 0; i < msb + 1; i++) {
IntType coInv(IntType(1 << i).ModInverse(modulus));
TableCOI[i] = coInv;
}
m_cycloOrderInverseTableByModulus[modulus] = TableCOI;
// if (typeid(IntType) == typeid(NativeInteger)) {
// NativeInteger nativeModulus = modulus.ConvertToInt();
// VecType preconTable(CycloOrderHf, nativeModulus);
// VecType preconTableI(CycloOrderHf, nativeModulus);
// for (usint i = 0; i < CycloOrderHf; i++) {
// preconTable[i] =
// NativeInteger(
// m_rootOfUnityReverseTableByModulus[modulus][i].ConvertToInt())
// .PrepModMulConst(nativeModulus);
// preconTableI[i] =
// NativeInteger(
// m_rootOfUnityInverseReverseTableByModulus[modulus][i]
// .ConvertToInt())
// .PrepModMulConst(nativeModulus);
// }
// VecType preconTableCOI(msb + 1, nativeModulus);
// for (usint i = 0; i < msb + 1; i++) {
// preconTableCOI[i] =
// NativeInteger(
// m_cycloOrderInverseTableByModulus[modulus][i].ConvertToInt())
// .PrepModMulConst(nativeModulus);
// }
// m_rootOfUnityPreconReverseTableByModulus[modulus] = preconTable;
// m_rootOfUnityInversePreconReverseTableByModulus[modulus] = preconTableI;
// m_cycloOrderInversePreconTableByModulus[modulus] = preconTableCOI;
// }
}
}
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::PreCompute(std::vector<IntType>& rootOfUnity, const usint CycloOrder,
std::vector<IntType>& moduliiChain) {
usint numOfRootU = rootOfUnity.size();
usint numModulii = moduliiChain.size();
if (numOfRootU != numModulii) {
OPENFHE_THROW(lbcrypto::math_error, "size of root of unity and size of moduli chain not of same size");
}
for (usint i = 0; i < numOfRootU; ++i) {
IntType currentRoot(rootOfUnity[i]);
IntType currentMod(moduliiChain[i]);
PreCompute(currentRoot, CycloOrder, currentMod);
}
}
template <typename VecType>
void ChineseRemainderTransformFTTDyn<VecType>::Reset() {
m_cycloOrderInverseTableByModulus.clear();
m_cycloOrderInversePreconTableByModulus.clear();
m_rootOfUnityReverseTableByModulus.clear();
m_rootOfUnityInverseReverseTableByModulus.clear();
m_rootOfUnityPreconReverseTableByModulus.clear();
m_rootOfUnityInversePreconReverseTableByModulus.clear();
}
template <typename VecType>
void BluesteinFFTDyn<VecType>::PreComputeDefaultNTTModulusRoot(usint cycloOrder, const IntType& modulus) {
usint nttDim = pow(2, ceil(log2(2 * cycloOrder - 1)));
const auto nttModulus = lbcrypto::FirstPrime<IntType>(log2(nttDim) + 2 * modulus.GetMSB(), nttDim);
const auto nttRoot = RootOfUnity(nttDim, nttModulus);
const ModulusRoot<IntType> nttModulusRoot = {nttModulus, nttRoot};
m_defaultNTTModulusRoot[modulus] = nttModulusRoot;
PreComputeRootTableForNTT(cycloOrder, nttModulusRoot);
}
template <typename VecType>
void BluesteinFFTDyn<VecType>::PreComputeRootTableForNTT(usint cyclotoOrder,
const ModulusRoot<IntType>& nttModulusRoot) {
usint nttDim = pow(2, ceil(log2(2 * cyclotoOrder - 1)));
const auto& nttModulus = nttModulusRoot.first;
const auto& nttRoot = nttModulusRoot.second;
IntType root(nttRoot);
auto rootInv = root.ModInverse(nttModulus);
usint nttDimHf = (nttDim >> 1);
VecType rootTable(nttDimHf, nttModulus);
VecType rootTableInverse(nttDimHf, nttModulus);
IntType x(1);
for (usint i = 0; i < nttDimHf; i++) {
rootTable[i] = x;
x = x.ModMul(root, nttModulus);
}
x = 1;
for (usint i = 0; i < nttDimHf; i++) {
rootTableInverse[i] = x;
x = x.ModMul(rootInv, nttModulus);
}
m_rootOfUnityTableByModulusRoot[nttModulusRoot] = rootTable;
m_rootOfUnityInverseTableByModulusRoot[nttModulusRoot] = rootTableInverse;
}
template <typename VecType>
void BluesteinFFTDyn<VecType>::PreComputePowers(usint cycloOrder, const ModulusRoot<IntType>& modulusRoot) {
const auto& modulus = modulusRoot.first;
const auto& root = modulusRoot.second;
VecType powers(cycloOrder, modulus);
powers[0] = 1;
for (usint i = 1; i < cycloOrder; i++) {
auto iSqr = (i * i) % (2 * cycloOrder);
auto val = root.ModExp(IntType(iSqr), modulus);
powers[i] = val;
}
m_powersTableByModulusRoot[modulusRoot] = powers;
}
template <typename VecType>
void BluesteinFFTDyn<VecType>::PreComputeRBTable(usint cycloOrder, const ModulusRootPair<IntType>& modulusRootPair) {
const auto& modulusRoot = modulusRootPair.first;
const auto& modulus = modulusRoot.first;
const auto& root = modulusRoot.second;
const auto rootInv = root.ModInverse(modulus);
const auto& nttModulusRoot = modulusRootPair.second;
const auto& nttModulus = nttModulusRoot.first;
// const auto &nttRoot = nttModulusRoot.second;
// assumes rootTable is precomputed
const auto& rootTable = m_rootOfUnityTableByModulusRoot[nttModulusRoot];
usint nttDim = pow(2, ceil(log2(2 * cycloOrder - 1)));
VecType b(2 * cycloOrder - 1, modulus);
b[cycloOrder - 1] = 1;
for (usint i = 1; i < cycloOrder; i++) {
auto iSqr = (i * i) % (2 * cycloOrder);
auto val = rootInv.ModExp(IntType(iSqr), modulus);
b[cycloOrder - 1 + i] = val;
b[cycloOrder - 1 - i] = val;
}
auto Rb = PadZeros(b, nttDim);
Rb.SetModulus(nttModulus);
VecType RB(nttDim);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(Rb, rootTable, &RB);
m_RBTableByModulusRootPair[modulusRootPair] = RB;
}
template <typename VecType>
VecType BluesteinFFTDyn<VecType>::ForwardTransform(const VecType& element, const IntType& root,
const usint cycloOrder) {
const auto& modulus = element.GetModulus();
const auto& nttModulusRoot = m_defaultNTTModulusRoot[modulus];
return ForwardTransform(element, root, cycloOrder, nttModulusRoot);
}
template <typename VecType>
VecType BluesteinFFTDyn<VecType>::ForwardTransform(const VecType& element, const IntType& root, const usint cycloOrder,
const ModulusRoot<IntType>& nttModulusRoot) {
if (element.GetLength() != cycloOrder) {
OPENFHE_THROW(lbcrypto::math_error, "expected size of element vector should be equal to cyclotomic order");
}
const auto& modulus = element.GetModulus();
const ModulusRoot<IntType> modulusRoot = {modulus, root};
const VecType& powers = m_powersTableByModulusRoot[modulusRoot];
const auto& nttModulus = nttModulusRoot.first;
// assumes rootTable is precomputed
const auto& rootTable = m_rootOfUnityTableByModulusRoot[nttModulusRoot];
const auto& rootTableInverse =
m_rootOfUnityInverseTableByModulusRoot[nttModulusRoot]; // assumes rootTableInverse is precomputed
VecType x = element.ModMul(powers);
usint nttDim = pow(2, ceil(log2(2 * cycloOrder - 1)));
auto Ra = PadZeros(x, nttDim);
Ra.SetModulus(nttModulus);
VecType RA(nttDim);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(Ra, rootTable, &RA);
const ModulusRootPair<IntType> modulusRootPair = {modulusRoot, nttModulusRoot};
const auto& RB = m_RBTableByModulusRootPair[modulusRootPair];
auto RC = RA.ModMul(RB);
VecType Rc(nttDim);
NumberTheoreticTransformDyn<VecType>().InverseTransformIterative(RC, rootTableInverse, &Rc);
auto resizeRc = Resize(Rc, cycloOrder - 1, 2 * (cycloOrder - 1));
resizeRc.SetModulus(modulus);
resizeRc.ModEq(modulus);
auto result = resizeRc.ModMul(powers);
return result;
}
template <typename VecType>
VecType BluesteinFFTDyn<VecType>::PadZeros(const VecType& a, const usint finalSize) {
usint s = a.GetLength();
VecType result(finalSize, a.GetModulus());
for (usint i = 0; i < s; i++) {
result[i] = a[i];
}
for (usint i = a.GetLength(); i < finalSize; i++) {
result[i] = IntType(0);
}
return result;
}
template <typename VecType>
VecType BluesteinFFTDyn<VecType>::Resize(const VecType& a, usint lo, usint hi) {
VecType result(hi - lo + 1, a.GetModulus());
for (usint i = lo, j = 0; i <= hi; i++, j++) {
result[j] = a[i];
}
return result;
}
template <typename VecType>
void BluesteinFFTDyn<VecType>::Reset() {
m_rootOfUnityTableByModulusRoot.clear();
m_rootOfUnityInverseTableByModulusRoot.clear();
m_powersTableByModulusRoot.clear();
m_RBTableByModulusRootPair.clear();
m_defaultNTTModulusRoot.clear();
}
template <typename VecType>
void ChineseRemainderTransformArbDyn<VecType>::SetCylotomicPolynomial(const VecType& poly, const IntType& mod) {
m_cyclotomicPolyMap[mod] = poly;
}
template <typename VecType>
void ChineseRemainderTransformArbDyn<VecType>::PreCompute(const usint cyclotoOrder, const IntType& modulus) {
BluesteinFFTDyn<VecType>().PreComputeDefaultNTTModulusRoot(cyclotoOrder, modulus);
}
template <typename VecType>
void ChineseRemainderTransformArbDyn<VecType>::SetPreComputedNTTModulus(usint cyclotoOrder, const IntType& modulus,
const IntType& nttModulus,
const IntType& nttRoot) {
const ModulusRoot<IntType> nttModulusRoot = {nttModulus, nttRoot};
BluesteinFFTDyn<VecType>().PreComputeRootTableForNTT(cyclotoOrder, nttModulusRoot);
}
template <typename VecType>
void ChineseRemainderTransformArbDyn<VecType>::SetPreComputedNTTDivisionModulus(usint cyclotoOrder,
const IntType& modulus,
const IntType& nttMod,
const IntType& nttRootBig) {
OPENFHE_DEBUG_FLAG(false);
usint n = lbcrypto::GetTotient(cyclotoOrder);
OPENFHE_DEBUG("GetTotient(" << cyclotoOrder << ")= " << n);
usint power = cyclotoOrder - n;
m_nttDivisionDim[cyclotoOrder] = 2 * std::pow(2, ceil(log2(power)));
usint nttDimBig = std::pow(2, ceil(log2(2 * cyclotoOrder - 1)));
// Computes the root of unity for the division NTT based on the root of unity
// for regular NTT
IntType nttRoot = nttRootBig.ModExp(IntType(nttDimBig / m_nttDivisionDim[cyclotoOrder]), nttMod);
m_DivisionNTTModulus[modulus] = nttMod;
m_DivisionNTTRootOfUnity[modulus] = nttRoot;
// part0 setting of rootTable and inverse rootTable
usint nttDim = m_nttDivisionDim[cyclotoOrder];
IntType root(nttRoot);
auto rootInv = root.ModInverse(nttMod);
usint nttDimHf = (nttDim >> 1);
VecType rootTable(nttDimHf, nttMod);
VecType rootTableInverse(nttDimHf, nttMod);
IntType x(1);
for (usint i = 0; i < nttDimHf; i++) {
rootTable[i] = x;
x = x.ModMul(root, nttMod);
}
x = 1;
for (usint i = 0; i < nttDimHf; i++) {
rootTableInverse[i] = x;
x = x.ModMul(rootInv, nttMod);
}
m_rootOfUnityDivisionTableByModulus[nttMod] = rootTable;
m_rootOfUnityDivisionInverseTableByModulus[nttMod] = rootTableInverse;
// end of part0
// part1
const auto& RevCPM = InversePolyMod(m_cyclotomicPolyMap[modulus], modulus, power);
auto RevCPMPadded = BluesteinFFTDyn<VecType>().PadZeros(RevCPM, nttDim);
RevCPMPadded.SetModulus(nttMod);
// end of part1
VecType RA(nttDim);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(RevCPMPadded, rootTable, &RA);
m_cyclotomicPolyReverseNTTMap[modulus] = RA;
const auto& cycloPoly = m_cyclotomicPolyMap[modulus];
VecType QForwardTransform(nttDim, nttMod);
for (usint i = 0; i < cycloPoly.GetLength(); i++) {
QForwardTransform[i] = cycloPoly[i];
}
VecType QFwdResult(nttDim);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(QForwardTransform, rootTable, &QFwdResult);
m_cyclotomicPolyNTTMap[modulus] = QFwdResult;
}
template <typename VecType>
VecType ChineseRemainderTransformArbDyn<VecType>::InversePolyMod(const VecType& cycloPoly, const IntType& modulus,
usint power) {
VecType result(power, modulus);
usint r = ceil(log2(power));
VecType h(1, modulus); // h is a unit polynomial
h[0] = 1;
// Precompute the Barrett mu parameter
IntType mu = modulus.ComputeMu();
for (usint i = 0; i < r; i++) {
usint qDegree = std::pow(2, i + 1);
VecType q(qDegree + 1, modulus); // q = x^(2^i+1)
q[qDegree] = 1;
auto hSquare = PolynomialMultiplication(h, h);
auto a = h * IntType(2);
auto b = PolynomialMultiplication(hSquare, cycloPoly);
// b = 2h - gh^2
for (usint j = 0; j < b.GetLength(); j++) {
if (j < a.GetLength()) {
b[j] = a[j].ModSub(b[j], modulus, mu);
}
else {
b[j] = modulus.ModSub(b[j], modulus, mu);
}
}
h = PolyMod(b, q, modulus);
}
// take modulo x^power
for (usint i = 0; i < power; i++) {
result[i] = h[i];
}
return result;
}
template <typename VecType>
VecType ChineseRemainderTransformArbDyn<VecType>::ForwardTransform(const VecType& element, const IntType& root,
const IntType& nttModulus, const IntType& nttRoot,
const usint cycloOrder) {
usint phim = lbcrypto::GetTotient(cycloOrder);
if (element.GetLength() != phim) {
OPENFHE_THROW(lbcrypto::math_error, "element size should be equal to phim");
}
const auto& modulus = element.GetModulus();
const ModulusRoot<IntType> modulusRoot = {modulus, root};
const ModulusRoot<IntType> nttModulusRoot = {nttModulus, nttRoot};
const ModulusRootPair<IntType> modulusRootPair = {modulusRoot, nttModulusRoot};
#pragma omp critical
{
if (BluesteinFFTDyn<VecType>::m_rootOfUnityTableByModulusRoot[nttModulusRoot].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputeRootTableForNTT(cycloOrder, nttModulusRoot);
}
if (BluesteinFFTDyn<VecType>::m_powersTableByModulusRoot[modulusRoot].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputePowers(cycloOrder, modulusRoot);
}
if (BluesteinFFTDyn<VecType>::m_RBTableByModulusRootPair[modulusRootPair].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputeRBTable(cycloOrder, modulusRootPair);
}
}
VecType inputToBluestein = Pad(element, cycloOrder, true);
auto outputBluestein =
BluesteinFFTDyn<VecType>().ForwardTransform(inputToBluestein, root, cycloOrder, nttModulusRoot);
VecType output = Drop(outputBluestein, cycloOrder, true, nttModulus, nttRoot);
return output;
}
template <typename VecType>
VecType ChineseRemainderTransformArbDyn<VecType>::InverseTransform(const VecType& element, const IntType& root,
const IntType& nttModulus, const IntType& nttRoot,
const usint cycloOrder) {
usint phim = lbcrypto::GetTotient(cycloOrder);
if (element.GetLength() != phim) {
OPENFHE_THROW(lbcrypto::math_error, "element size should be equal to phim");
}
const auto& modulus = element.GetModulus();
auto rootInverse(root.ModInverse(modulus));
const ModulusRoot<IntType> modulusRootInverse = {modulus, rootInverse};
const ModulusRoot<IntType> nttModulusRoot = {nttModulus, nttRoot};
const ModulusRootPair<IntType> modulusRootPair = {modulusRootInverse, nttModulusRoot};
#pragma omp critical
{
if (BluesteinFFTDyn<VecType>::m_rootOfUnityTableByModulusRoot[nttModulusRoot].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputeRootTableForNTT(cycloOrder, nttModulusRoot);
}
if (BluesteinFFTDyn<VecType>::m_powersTableByModulusRoot[modulusRootInverse].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputePowers(cycloOrder, modulusRootInverse);
}
if (BluesteinFFTDyn<VecType>::m_RBTableByModulusRootPair[modulusRootPair].GetLength() == 0) {
BluesteinFFTDyn<VecType>().PreComputeRBTable(cycloOrder, modulusRootPair);
}
}
VecType inputToBluestein = Pad(element, cycloOrder, false);
auto outputBluestein =
BluesteinFFTDyn<VecType>().ForwardTransform(inputToBluestein, rootInverse, cycloOrder, nttModulusRoot);
auto cyclotomicInverse((IntType(cycloOrder)).ModInverse(modulus));
outputBluestein = outputBluestein * cyclotomicInverse;
VecType output = Drop(outputBluestein, cycloOrder, false, nttModulus, nttRoot);
return output;
}
template <typename VecType>
VecType ChineseRemainderTransformArbDyn<VecType>::Pad(const VecType& element, const usint cycloOrder, bool forward) {
usint n = lbcrypto::GetTotient(cycloOrder);
const auto& modulus = element.GetModulus();
VecType inputToBluestein(cycloOrder, modulus);
if (forward) { // Forward transform padding
for (usint i = 0; i < n; i++) {
inputToBluestein[i] = element[i];
}
}
else { // Inverse transform padding
auto tList = lbcrypto::GetTotientList(cycloOrder);
usint i = 0;
for (auto& coprime : tList) {
inputToBluestein[coprime] = element[i++];
}
}
return inputToBluestein;
}
template <typename VecType>
VecType ChineseRemainderTransformArbDyn<VecType>::Drop(const VecType& element, const usint cycloOrder, bool forward,
const IntType& bigMod, const IntType& bigRoot) {
usint n = lbcrypto::GetTotient(cycloOrder);
const auto& modulus = element.GetModulus();
VecType output(n, modulus);
if (forward) { // Forward transform drop
auto tList = lbcrypto::GetTotientList(cycloOrder);
for (usint i = 0; i < n; i++) {
output[i] = element[tList[i]];
}
}
else { // Inverse transform drop
if ((n + 1) == cycloOrder) {
IntType mu = modulus.ComputeMu(); // Precompute the Barrett mu parameter
// cycloOrder is prime: Reduce mod Phi_{n+1}(x)
// Reduction involves subtracting the coeff of x^n from all terms
auto coeff_n = element[n];
for (usint i = 0; i < n; i++) {
output[i] = element[i].ModSub(coeff_n, modulus, mu);
}
}
else if ((n + 1) * 2 == cycloOrder) {
IntType mu = modulus.ComputeMu(); // Precompute the Barrett mu parameter
// cycloOrder is 2*prime: 2 Step reduction
// First reduce mod x^(n+1)+1 (=(x+1)*Phi_{2*(n+1)}(x))
// Subtract co-efficient of x^(i+n+1) from x^(i)
for (usint i = 0; i < n; i++) {
auto coeff_i = element[i];
auto coeff_ip = element[i + n + 1];
output[i] = coeff_i.ModSub(coeff_ip, modulus, mu);
}
auto coeff_n = element[n].ModSub(element[2 * n + 1], modulus, mu);
// Now reduce mod Phi_{2*(n+1)}(x)
// Similar to the prime case but with alternating signs
for (usint i = 0; i < n; i++) {
if (i % 2 == 0) {
output[i].ModSubEq(coeff_n, modulus, mu);
}
else {
output[i].ModAddEq(coeff_n, modulus, mu);
}
}
}
else {
// precompute root of unity tables for division NTT
if ((m_rootOfUnityDivisionTableByModulus[bigMod].GetLength() == 0) ||
(m_DivisionNTTModulus[modulus] != bigMod)) {
SetPreComputedNTTDivisionModulus(cycloOrder, modulus, bigMod, bigRoot);
}
// cycloOrder is arbitrary
// auto output = PolyMod(element, this->m_cyclotomicPolyMap[modulus],
// modulus);
const auto& nttMod = m_DivisionNTTModulus[modulus];
const auto& rootTable = m_rootOfUnityDivisionTableByModulus[nttMod];
VecType aPadded2(m_nttDivisionDim[cycloOrder], nttMod);
// perform mod operation
usint power = cycloOrder - n;
for (usint i = n; i < element.GetLength(); i++) {
aPadded2[power - (i - n) - 1] = element[i];
}
VecType A(m_nttDivisionDim[cycloOrder]);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(aPadded2, rootTable, &A);
auto AB = A * m_cyclotomicPolyReverseNTTMap[modulus];
const auto& rootTableInverse = m_rootOfUnityDivisionInverseTableByModulus[nttMod];
VecType a(m_nttDivisionDim[cycloOrder]);
NumberTheoreticTransformDyn<VecType>().InverseTransformIterative(AB, rootTableInverse, &a);
VecType quotient(m_nttDivisionDim[cycloOrder], modulus);
for (usint i = 0; i < power; i++) {
quotient[i] = a[i];
}
quotient.ModEq(modulus);
quotient.SetModulus(nttMod);
VecType newQuotient(m_nttDivisionDim[cycloOrder]);
NumberTheoreticTransformDyn<VecType>().ForwardTransformIterative(quotient, rootTable, &newQuotient);
newQuotient *= m_cyclotomicPolyNTTMap[modulus];
VecType newQuotient2(m_nttDivisionDim[cycloOrder]);
NumberTheoreticTransformDyn<VecType>().InverseTransformIterative(newQuotient, rootTableInverse,
&newQuotient2);
newQuotient2.SetModulus(modulus);
newQuotient2.ModEq(modulus);
IntType mu = modulus.ComputeMu(); // Precompute the Barrett mu parameter
for (usint i = 0; i < n; i++) {
output[i] = element[i].ModSub(newQuotient2[cycloOrder - 1 - i], modulus, mu);
}
}
}
return output;
}
template <typename VecType>
void ChineseRemainderTransformArbDyn<VecType>::Reset() {
m_cyclotomicPolyMap.clear();
m_cyclotomicPolyReverseNTTMap.clear();
m_cyclotomicPolyNTTMap.clear();
m_rootOfUnityDivisionTableByModulus.clear();
m_rootOfUnityDivisionInverseTableByModulus.clear();
m_DivisionNTTModulus.clear();
m_DivisionNTTRootOfUnity.clear();
m_nttDivisionDim.clear();
BluesteinFFTDyn<VecType>().Reset();
}
} // namespace bigintdyn
#endif // __TRANSFORMDYN_IMPL_H__
|
nvptx_target_printf_codegen.c
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
// expected-no-diagnostics
extern int printf(const char *, ...);
// Check a simple call to printf end-to-end.
int CheckSimple() {
#pragma omp target
{
// printf in master-only basic block.
const char* fmt = "%d %lld %f";
printf(fmt, 1, 2ll, 3.0);
}
return 0;
}
void CheckNoArgs() {
#pragma omp target
{
// printf in master-only basic block.
printf("hello, world!");
}
}
// Check that printf's alloca happens in the entry block, not inside the if
// statement.
int foo;
void CheckAllocaIsInEntryBlock() {
#pragma omp target
{
if (foo) {
printf("%d", 42);
}
}
}
//
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-64-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FMT:%.*]] = alloca i8*, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0), i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-64-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-64-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-64-SAME: () #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i64 0, i64 0), i8* null, i32 0)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-64-SAME: (i64 [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FOO_ADDR:%.*]] = alloca i64, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-64-NEXT: store i64 [[FOO]], i64* [[FOO_ADDR]], align 8
// CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[FOO_ADDR]] to i32*
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK-64-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-64-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-64: if.then:
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i64 0, i64 0), i8* [[TMP3]], i32 4)
// CHECK-64-NEXT: br label [[IF_END]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: if.end:
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
//
//
//
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FMT:%.*]] = alloca i8*, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-32-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-32-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-32-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-32-SAME: () #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i32 0, i32 0), i8* null, i32 0)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-32-SAME: (i32 [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FOO_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-32-NEXT: store i32 [[FOO]], i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-32-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-32: if.then:
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i32 0, i32 0), i8* [[TMP3]], i32 4)
// CHECK-32-NEXT: br label [[IF_END]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: if.end:
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
//
|
lastprivatemissing-var-yes.c
|
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: [email protected], [email protected], [email protected],
[email protected], [email protected])
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// x: not live-in, yes live-out
// outer scope
// loop-carried output-dependence: x=... : accept values based on loop variable; or not.
//Solution: Can be parallelized using lastprivate(x)
//
// Semantics of lastprivate (x)
// causes the corresponding original list item to be updated after the end of the region.
// The compiler/runtime copies the local value back to the shared one within the last iteration.
// Without lastprivate(x), there will be race condition for x.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,x;
int len = 10000;
if (argc>1)
len = atoi(argv[1]);
#pragma omp parallel for private (i)
for (i=0;i<len;i++)
x=i;
printf("x=%d",x);
return 0;
}
|
DenseMatrix.h
|
//=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2017 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP Schur product assignment of a dense matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix for the Schur product.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP Schur product assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP Schur product assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix for the Schur product.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP Schur product assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSchurAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
shape_registration.c
|
//==============================================================================
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
//==============================================================================
#include "shape_registration.h"
//==============================================================================
size_t bandPts = 0, clipBoxPts = 0;
//==============================================================================
// Stores generated random points
typedef struct {
size_t k, i, j;
} Random3DPoints;
// stores calc. distances from generated points
typedef struct {
dataType distDifference;
} distanceCalculated;
//
typedef struct {
dataType pvl, qvl, hvl;
} xNbDistances;
typedef struct {
dataType pvl, qvl, hvl;
} yNbDistances;
typedef struct {
dataType pvl, qvl, hvl;
} zNbDistances;
typedef struct {
dataType xFwd, yFwd, zFwd;
} Finite_Differences;
//==============================================================================
typedef struct { size_t k, i, j; } CoordPoints;
//==============================================================================
CoordPoints transformPoint(CoordPoints * inputPoints, Point3D translation, Point3D scaling, Point3D rotation, dataType centroid[3], size_t imageHeight, size_t imageLength, size_t imageWidth, int loc);
dataType getDistance(dataType ** binaryImage, size_t imageHeight, size_t imageLength, size_t dim2D, const size_t k1, const size_t x1, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize);
size_t surfacePoints(dataType ** binaryImage, size_t imageLength, const unsigned char fgroundValue, ClipBox bestfitBox);
//==============================================================================
void nbPointsX(dataType ** transformedBinaryData, dataType pixelSize, size_t x, size_t k, size_t i, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize);
void nbPointsY(dataType ** transformedBinaryData, dataType pixelSize, size_t k, size_t i, size_t j, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize);
void nbPointsZ(dataType ** transformedBinaryData, dataType pixelSize, size_t x, size_t k, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize);
//==============================================================================
dataType finiteDifAll(dataType pValue, dataType qValue, dataType hh);
//==============================================================================
void run_registration(dataType **fixedData, dataType **movingData, dataType **resultPtr, size_t zDim, size_t xDim, size_t yDim, Registration_Params params, Optimization_Method gdescentMethod)
{
//==============================================================================
// Variable definition
dataType step_size = params.step_size;
dataType tol = params.tolerance;
//==============================================================================
dataType firstCpuTime, secondCpuTime;
//==============================================================================
// Centroid Parameters
dataType fixedCentroid[3], movingCentroid[3];
// Centroid Method Fixed/Destination Data
centroidImage(fixedData, fixedCentroid, zDim, xDim, yDim, params.imageBackground);
// Centroid Method Moving/Source Data
centroidImage(movingData, movingCentroid, zDim, xDim, yDim, params.imageBackground);
//==============================================================================
// Set the Translation approximation
Point3D translationTran;
translationTran.x = fixedCentroid[0] - movingCentroid[0];
translationTran.y = fixedCentroid[1] - movingCentroid[1];
translationTran.z = fixedCentroid[2] - movingCentroid[2];
//==============================================================================
// Sets Transformation Parameters to be used in Registration
Affine_Parameter finalResults;
Point3D rotationTran = { 0.0, 0.0, 0.0 };
Point3D scalingTran = { 1.0, 1.0, 1.0 };
finalResults.rotation = rotationTran, finalResults.scaling = scalingTran, finalResults.translation = translationTran;
//==============================================================================
// Call the registration Function
//==============================================================================
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (gdescentMethod == GRADIENT_DESCENT)
{
finalResults = registration3D(fixedData, movingData, finalResults, step_size, tol, zDim, xDim, yDim, movingCentroid, params);
}
else if (gdescentMethod == STOCHASTIC_DESCENT)
{
finalResults = registrationStochastic3D(fixedData, movingData, finalResults, step_size, tol, zDim, xDim, yDim, movingCentroid, params);
}
else if (gdescentMethod == BLOCK_COORDINATE_DESCENT)
{
finalResults = registrationCoorDinateDescent3D(fixedData, movingData, finalResults, step_size, tol, zDim, xDim, yDim, movingCentroid, params);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
printf("Total Registration CPU time: %e secs\n", secondCpuTime - firstCpuTime);
//==============================================================================
// Apply transformation results to destination - Expect same as source~approximately
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
transform3DImage(movingData, resultPtr, finalResults.translation, finalResults.scaling, finalResults.rotation, zDim, xDim, yDim, params.imageBackground, movingCentroid, params.parallelize);
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
//==============================================================================
#endif
#ifdef CONSOLE_OUTPUT
printf("Final Resulting Transformation CPU time: %e secs\n\n", secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Save the Resultant Transformation
params.affineResults = finalResults;
//==============================================================================
}
//==============================================================================
void fastMarching(dataType ** distancePtr, dataType ** dataSourcePtr, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType objPixel)
{
size_t k, i, j, x;
struct Node * band = NULL; // Holds all the Objects
// Sets the structure size, to hold all the calculated arrival times
Obj_Structure ** objectNthD = (Obj_Structure **)malloc(sizeof(Obj_Structure*)*imageHeight);
for (i = 0; i < imageHeight; i++)
{
objectNthD[i] = (Obj_Structure *)malloc(sizeof(Obj_Structure) * (imageLength*imageWidth));
}
// Initialize Object2D
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
objectNthD[k][x].arrival = INFINITY;
objectNthD[k][x].state = UNKNOWN;
objectNthD[k][x].xpos = i;
objectNthD[k][x].ypos = j;
objectNthD[k][x].zpos = k;
objectNthD[k][x].position = x_flat(i, j, k, imageLength, imageWidth);
}
}
}
Point3D *shapePoints = (Point3D *)malloc(sizeof(Point3D)*(imageHeight*imageLength*imageWidth));
int loop = 0;
// Derive the points
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 1D representation
x = x_new(i, j, imageLength);
if (dataSourcePtr[k][x] == objPixel) // Fill value for block
{
// Save the dimension with those values
shapePoints[loop].x = (dataType)i;
shapePoints[loop].y = (dataType)j;
shapePoints[loop].z = (dataType)k;
loop++;
}
}
}
}
// Arrival times
Arrival_Time *shapeArrival = (Arrival_Time *)malloc(sizeof(Arrival_Time)*loop);
for (i = 0; i < loop; i++)
{
shapeArrival[i].T = 0.0;
}
// Calls Fm3D
fastMarching3D(band, objectNthD, shapePoints, shapeArrival, imageHeight, imageLength, imageWidth, loop);
// Copy Fast marching modified to distancePtr
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < (imageLength*imageWidth); i++)
{
distancePtr[k][i] = objectNthD[k][i].arrival;
}
}
shapePoints = NULL;
shapeArrival = NULL;
for (i = 0; i < imageHeight; i++)
{
free(objectNthD[i]);
}
//deleteList(band);
}
//==============================================================================
// Prototypes
void centroidImage(dataType ** imageDataPtr, dataType *centroid, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType imageBackground)
{
size_t k, i, j, counts = 0;
dataType x = 0.0, y = 0.0, z = 0.0;
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t xD = x_new(i, j, imageLength);
if (imageDataPtr[k][xD] != imageBackground)
{
x += i;
y += j;
z += k;
counts++;
}
}
}
}
// Check K incase no 0 was found - not a shape
if (counts == 0)
{
counts = 1;
}
// Set the Centers of the shape
centroid[0] = x / counts, centroid[1] = y / counts, centroid[2] = z / counts;
}
//==============================================================================
void centroidClipBox(dataType *centroid, ClipBox coord, dataType ** imageDataPtr, size_t imageLength, dataType imageBackground)
{
size_t k, i, j, counts = 0;
dataType x = 0.0, y = 0.0, z = 0.0;
for (k = coord.k_min; k <= coord.k_max; k++)
{
for (i = coord.i_min; i <= coord.i_max; i++)
{
for (j = coord.j_min; j <= coord.j_max; j++)
{
// 2D to 1D representation for i, j
size_t xD = x_new(i, j, imageLength);
if (imageDataPtr[k][xD] != imageBackground)
{
x += i;
y += j;
z += k;
counts++;
}
}
}
}
// Check K incase no 0 was found - not a shape
if (counts == 0)
{
counts = 1;
}
// Set the Centers of the shape
centroid[0] = x / counts, centroid[1] = y / counts, centroid[2] = z / counts;
}
//==============================================================================
inline int NFunctionBinary(dataType v1, dataType v2, dataType delta)
{
//==============================================================================
if (abs((int)v1) == delta || abs((int)v2) == delta)
{
return 1;
}
else
{
return 0;
}
//==============================================================================
}
//==============================================================================
int NFunction(dataType val1, dataType val2, dataType delta)
{
if (fabs(val1) < fabs(val2))
{
if (fabs(val1) > delta)
{
return 0;
}
else
{
return 1;
}
}
else
{
if (fabs(val2) > delta)
{
return 0;
}
else
{
return 1;
}
}
}
inline int NFunctionOne(dataType v1, dataType delta)
{
#ifdef USE_NARROWBAND
if (abs((int)v1) > delta)
{
return 0;
}
else
{
return 1;
}
#else
return 1;
#endif // USE_NARROWBAND
}
//==============================================================================
dataType energyFunction(dataType ** destination, dataType ** distTrans, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType h)
{
size_t k, i, j, counter = 0;
dataType energy = 0.0;
// Calcaulate the error in grid functions
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
if (NFunction(destination[k][x], distTrans[k][x], NDelta) == 1)
{
energy += (destination[k][x] - distTrans[k][x]) * (destination[k][x] - distTrans[k][x]);
counter++;
}
}
}
}
return ((h * energy) / (2 * counter));
}
//==============================================================================
dataType energyFunctionClip(dataType ** destination, dataType **distTrans, ClipBox coord, size_t imageLength)
{
size_t k, i, j, counter = 0, clipPts = 0;
dataType energy = 0.0;
//==============================================================================
// Energy calculation withing the clipped box and inside narrow band
for (k = coord.k_min; k <= coord.k_max; k++)
{
for (i = coord.i_min; i <= coord.i_max; i++)
{
for (j = coord.j_min; j <= coord.j_max; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
if (NFunction(destination[k][x], distTrans[k][x], NDelta) == 1)
{
energy += (destination[k][x] - distTrans[k][x]) * (destination[k][x] - distTrans[k][x]);
counter++; // Points within the narrow band
}
clipPts++; // Count points within the clipbox
}
}
}
//==============================================================================
bandPts = counter;
clipBoxPts = clipPts;
return (energy / (2 * counter));
}
//==============================================================================
dataType energyFunctionClipBandArea(dataType ** destination, dataType ** distTrans, ClipBox coord, size_t imageLength, dataType ** fixedNBandPtr, dataType ** movingNBandPtr, dataType imageForeground)
{
size_t k, i, j, counter = 0, clipPts = 0;
dataType energy = 0.0;
//==============================================================================
// Energy calculation withing the clipped box and inside narrow band
for (k = coord.k_min; k <= coord.k_max; k++)
{
for (i = coord.i_min; i <= coord.i_max; i++)
{
for (j = coord.j_min; j <= coord.j_max; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
if (NFunctionBinary(fixedNBandPtr[k][x], movingNBandPtr[k][x], imageForeground) == 1)
{
energy += (destination[k][x] - distTrans[k][x]) * (destination[k][x] - distTrans[k][x]);
counter++; // Points within the narrow band
}
clipPts++; // Count points within the clipbox
}
}
}
//==============================================================================
bandPts = counter;
clipBoxPts = clipPts;
return (energy / (2 * counter));
}
//==============================================================================
dataType finiteDifX(dataType ** distPtr, dataType h, size_t x, size_t k, size_t i, size_t imageLength)
{
if (i == 0) // Apply Forward Difference
{
return (distPtr[k][x + 1] - distPtr[k][x]) / (h);
}
else if (i >= imageLength - 1) // Apply Backward Difference
{
return (distPtr[k][x] - distPtr[k][x - 1]) / (h);
}
else // Apply Central Difference
{
return (distPtr[k][x + 1] - distPtr[k][x - 1]) / (2 * h);
}
}
//==============================================================================
dataType finiteDifY(dataType ** distPtr, dataType h, size_t k, size_t i, size_t j, size_t imageLength, size_t imageWidth)
{
size_t x_n, x_p;
if (j == 0) // Apply Forward Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j + 1, imageLength);
x_p = x_new(i, j, imageLength);
return (distPtr[k][x_n] - distPtr[k][x_p]) / (h);
}
else if (j >= imageWidth - 1) // Apply Backward Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j, imageLength);
x_p = x_new(i, j - 1, imageLength);
return (distPtr[k][x_n] - distPtr[k][x_p]) / (h);
}
else // Apply Central Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j + 1, imageLength);
x_p = x_new(i, j - 1, imageLength);
return (distPtr[k][x_n] - distPtr[k][x_p]) / (2 * h);
}
}
//==============================================================================
dataType finiteDifZ(dataType ** distPtr, dataType h, size_t x, size_t k, size_t i, size_t imageLength, size_t imageHeight)
{
if (k == 0) // Apply Forward Difference
{
return (distPtr[k + 1][x] - distPtr[k][x]) / (h);
}
else if (k >= imageHeight - 1) // Apply Backward Difference
{
return (distPtr[k][x] - distPtr[k - 1][x]) / (h);
}
else // Apply Central Difference
{
return (distPtr[k + 1][x] - distPtr[k - 1][x]) / (2 * h);
}
}
//==============================================================================
void nbPointsX(dataType ** transformedBinaryData, dataType pixelSize, size_t x, size_t k, size_t i, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize)
{
////==============================================================================
size_t dim2D = imageLength * imageWidth;
//==============================================================================
// neighbourPoints nb;
dataType pv, qv;
//==============================================================================
if (i == 0) // Beginings
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x + 1, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 1;
}
else if (i >= imageLength - 1) // End
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x - 1, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 1;
}
else // Central values
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x + 1, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x - 1, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
//*qValue = distanceResultsPtr[k][x - 1];
*hh = pixelSize * 2;
}
// *pValue = nb.pv; *qValue = nb.qv;
*pValue = pv; *qValue = qv;
}
//==============================================================================
void nbPointsY(dataType ** transformedBinaryData, dataType pixelSize, size_t k, size_t i, size_t j, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize)
{
////==============================================================================
size_t dim2D = imageLength * imageWidth;
//==============================================================================
// neighbourPoints nb;
dataType pv, qv;
//==============================================================================
size_t x_n, x_p;
if (j == 0) // Apply Forward Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j + 1, imageLength);
x_p = x_new(i, j, imageLength);
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_n, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_p, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 1;
}
else if (j >= imageWidth - 1) // Apply Backward Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j, imageLength);
x_p = x_new(i, j - 1, imageLength);
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_n, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_p, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 1;
}
else // Apply Central Difference
{
// 2D to 1D representation for i, j
x_n = x_new(i, j + 1, imageLength);
x_p = x_new(i, j - 1, imageLength);
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_n, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x_p, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 2;
}
// *pValue = nb.pv; *qValue = nb.qv;
*pValue = pv; *qValue = qv;
}
//==============================================================================
void nbPointsZ(dataType ** transformedBinaryData, dataType pixelSize, size_t x, size_t k, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType * pValue, dataType * qValue, dataType *hh, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize)
{
////==============================================================================
size_t dim2D = imageLength * imageWidth;
//==============================================================================
// neighbourPoints nb;
dataType pv, qv;
//==============================================================================
if (k == 0) // Apply Forward Difference
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k + 1, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 1;
//==============================================================================
}
else if (k >= imageHeight - 1) // Apply Backward Difference
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k - 1, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
//*qValue = distanceResultsPtr[k - 1][x];
*hh = pixelSize * 1;
//==============================================================================
}
else // Apply Central Difference
{
pv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k + 1, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
qv = getDistance(transformedBinaryData, imageHeight, imageLength, dim2D, k - 1, x, bestfitBox, surface_points, ptsNum, insideShapevalue, parallelize);
*hh = pixelSize * 2;
//==============================================================================
}
// *pValue = nb.pv; *qValue = nb.qv;
*pValue = pv; *qValue = qv;
}
//==============================================================================
dataType finiteDifAll(dataType pValue, dataType qValue, dataType hh)
{
return (pValue - qValue) / hh;
}
//==============================================================================
Affine_Parameter gradientComponents(dataType ** destPtr, dataType ** distTrans, dataType h, Affine_Parameter * params, size_t imageHeight, size_t imageLength, size_t imageWidth)
{
Affine_Parameter results;
// Initialize the parameters
size_t k, i, j, x, counter = 0;
// Initialize the results
results.rotation.x = 0.0, results.rotation.y = 0.0, results.rotation.z = 0.0;
results.scaling.x = 0.0, results.scaling.y = 0.0, results.scaling.z = 0.0;
results.translation.x = 0.0, results.translation.y = 0.0, results.translation.z = 0.0;
// Forward difference parameters
dataType xFwd, yFwd, zFwd;
// Derivative component
dataType componentX, componentY, componentZ;
// Stores the difference between two distance pointers
dataType distDifference;
// Shorter Transformation names
dataType phi = params->rotation.x, theta = params->rotation.y, psi = params->rotation.z;
dataType sx = params->scaling.x, sy = params->scaling.y, sz = params->scaling.z;
dataType tx = params->translation.x, ty = params->translation.y, tz = params->translation.z;
// Trigonometric functions as const
// Rotation
const double neg_sin_phi_sin_psi = -sin(phi)*sin(psi), _cos_phi_cos_psi_sin_theta = cos(phi)*cos(psi)*sin(theta), neg_cos_psi_sin_phi = -cos(psi)*sin(phi), _cos_phi_sin_psi_sin_theta = cos(phi)*sin(psi)*sin(theta), _cos_phi_cos_theta = cos(phi)*cos(theta);
const double _cos_phi_sin_psi = cos(phi)*sin(psi), neg_cos_phi_sin_psi = -cos(phi)*sin(psi), _cos_psi_sin_phi_sin_theta = cos(psi)*sin(phi)*sin(theta), _cos_phi_cos_psi = cos(phi)*cos(psi), _sin_phi_sin_psi_sin_theta = sin(phi)*sin(psi)*sin(theta), _cos_theta_sin_phi = cos(theta) * sin(phi);
const double _cos_psi_sin_theta = cos(psi)*sin(theta), _sin_psi_sin_theta = sin(psi)*sin(theta), _cos_theta = cos(theta), _cos_psi_cos_theta_sin_phi = cos(psi)*cos(theta)*sin(phi), _cos_theta_sin_phi_sin_psi = cos(theta)*sin(phi)*sin(psi), _sin_phi_sin_theta = sin(phi)*sin(theta);
const double _cos_phi_cos_psi_cos_theta = cos(phi)*cos(psi)*cos(theta), _cos_phi_cos_theta_sin_psi_ = cos(phi)*cos(theta)*sin(psi), _cos_phi_sin_theta = cos(phi)*sin(theta);
// Scaling
const double _cos_psi_cos_theta = cos(psi)*cos(theta), _cos_theta_sin_psi = cos(theta)*sin(psi), _sin_theta = sin(theta);
const double _sin_phi_sin_psi = sin(phi)*sin(psi);
const double _cos_psi_sin_phi = cos(psi)*sin(phi);
// Scales
const dataType _sx_sx = sx * sx, _sy_sy = sy * sy, _sz_sz = sz * sz;
// Begin Evaluation
for (k = 0; k < imageHeight; k++)
{
for (j = 0; j < imageWidth; j++)
{
x = x_new(0, j, imageLength);
for (i = 0; i < imageLength; i++)
{
// 2D to 1D representation for i, j
/*x = x_new(i, j, imageLength);*/
if (NFunction(destPtr[k][x], distTrans[k][x], NDelta) == 1)
{
counter++;
// Store the distance function difference
distDifference = (dataType)((destPtr[k][x] - distTrans[k][x]) * 2.0);
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
// Apply Forward Differences to the distTrans pointer
xFwd = finiteDifX(distTrans, h, x, k, i, imageLength);
yFwd = finiteDifY(distTrans, h, k, i, j, imageLength, imageWidth);
zFwd = finiteDifZ(distTrans, h, x, k, i, imageLength, imageHeight);
// Evaluate Individual Gradient Components
#ifdef DIRECTIONAL
// Rotation Components - Directionnal
componentX = (dataType)(yFwd * (((tmpI)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_psi_sin_phi - _cos_phi_sin_psi_sin_theta) / sy)) + ((-tmpK)*((_cos_phi_cos_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz))));
results.rotation.x += (componentX)*(distDifference);
componentY = (dataType)(xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_sin_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_cos_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_sin_psi) / sy)) + ((tmpK)*((_sin_phi_sin_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_cos_psi_cos_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_theta_sin_psi_) / sz)) + ((-tmpK)*(_cos_phi_sin_theta / sz))));
results.rotation.y += (componentY)*(distDifference);
componentZ = (dataType)(xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_phi_sin_psi - _cos_psi_sin_phi_sin_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / sz)) + ((tmpJ)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sz))));
results.rotation.z += (componentZ)*(distDifference);
// Directional Scale Components
componentX = (dataType)(xFwd * ((-tmpI)*((_cos_psi_cos_theta) / (_sx_sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (_sx_sx))) + ((-tmpK)*((_sin_theta) / (_sx_sx)))));
results.scaling.x += (componentX)*(distDifference);
componentY = (dataType)(yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / (_sy_sy))) + ((-tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / (_sy_sy))) + ((tmpK)*((_cos_theta_sin_phi) / (_sy_sy)))));
results.scaling.y += (componentY)*(distDifference);
componentZ = (dataType)(zFwd * (((-tmpI)*((_sin_phi_sin_psi - _cos_phi_cos_psi_sin_theta) / (_sz_sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / (_sz_sz))) + ((-tmpK)*((_cos_phi_cos_theta) / (_sz_sz)))));
results.scaling.z += (componentZ)*(distDifference);
#endif // DIRECTIONAL
// Translation Parameters - Always directional
// Tx
results.translation.x += (-xFwd)*(distDifference);
// Ty
results.translation.y += (-yFwd)*(distDifference);
// Tz
results.translation.z += (-zFwd)*(distDifference);
}
x++;
}
}
}
// Normalize results
if (counter == 0)
{
counter = 1;
}
// NORMALIZATION
results.scaling.x = results.scaling.x / counter;
results.scaling.y = results.scaling.y / counter;
results.scaling.z = results.scaling.z / counter;
results.rotation.x = results.rotation.x / counter;
results.rotation.y = results.rotation.y / counter;
results.rotation.z = results.rotation.z / counter;
results.translation.x = results.translation.x / counter;
// Ty
results.translation.y = results.translation.y / counter;
// Tz
results.translation.z = results.translation.z / counter;
return results;
}
//==============================================================================
Affine_Parameter gradientCoorDinateDescentComp(dataType ** destPtr, dataType ** distTrans, dataType h, Affine_Parameter * params, size_t imageHeight, size_t imageLength, size_t imageWidth, size_t updateComponent)
{
Affine_Parameter results;
// Initialize the parameters
size_t k, i, j, x, counter = 0;
// Initialize the results
results.rotation.x = 0.0, results.rotation.y = 0.0, results.rotation.z = 0.0;
results.scaling.x = 0.0, results.scaling.y = 0.0, results.scaling.z = 0.0;
results.translation.x = 0.0, results.translation.y = 0.0, results.translation.z = 0.0;
// Forward difference parameters
dataType xFwd, yFwd, zFwd;
// Derivative component
dataType componentX, componentY, componentZ;
// Stores the difference between two distance pointers
dataType distDifference;
// Shorter Transformation names
dataType phi = params->rotation.x, theta = params->rotation.y, psi = params->rotation.z;
dataType sx = params->scaling.x, sy = params->scaling.y, sz = params->scaling.z;
dataType tx = params->translation.x, ty = params->translation.y, tz = params->translation.z;
// Trigonometric functions as const
// Rotation
const double neg_sin_phi_sin_psi = -sin(phi)*sin(psi), _cos_phi_cos_psi_sin_theta = cos(phi)*cos(psi)*sin(theta), neg_cos_psi_sin_phi = -cos(psi)*sin(phi), _cos_phi_sin_psi_sin_theta = cos(phi)*sin(psi)*sin(theta), _cos_phi_cos_theta = cos(phi)*cos(theta);
const double _cos_phi_sin_psi = cos(phi)*sin(psi), neg_cos_phi_sin_psi = -cos(phi)*sin(psi), _cos_psi_sin_phi_sin_theta = cos(psi)*sin(phi)*sin(theta), _cos_phi_cos_psi = cos(phi)*cos(psi), _sin_phi_sin_psi_sin_theta = sin(phi)*sin(psi)*sin(theta), _cos_theta_sin_phi = cos(theta) * sin(phi);
const double _cos_psi_sin_theta = cos(psi)*sin(theta), _sin_psi_sin_theta = sin(psi)*sin(theta), _cos_theta = cos(theta), _cos_psi_cos_theta_sin_phi = cos(psi)*cos(theta)*sin(phi), _cos_theta_sin_phi_sin_psi = cos(theta)*sin(phi)*sin(psi), _sin_phi_sin_theta = sin(phi)*sin(theta);
const double _cos_phi_cos_psi_cos_theta = cos(phi)*cos(psi)*cos(theta), _cos_phi_cos_theta_sin_psi_ = cos(phi)*cos(theta)*sin(psi), _cos_phi_sin_theta = cos(phi)*sin(theta);
// Scaling
const double _cos_psi_cos_theta = cos(psi)*cos(theta), _cos_theta_sin_psi = cos(theta)*sin(psi), _sin_theta = sin(theta);
const double _sin_phi_sin_psi = sin(phi)*sin(psi);
const double _cos_psi_sin_phi = cos(psi)*sin(phi);
// Scales
const dataType _sx_sx = sx * sx, _sy_sy = sy * sy, _sz_sz = sz * sz;
// Begin Evaluation
for (k = 0; k < imageHeight; k++)
{
for (j = 0; j < imageWidth; j++)
{
x = x_new(0, j, imageLength);
for (i = 0; i < imageLength; i++)
{
// 2D to 1D representation for i, j
/*x = x_new(i, j, imageLength);*/
if (NFunction(destPtr[k][x], distTrans[k][x], NDelta) == 1)
{
counter++;
// Store the distance function difference
distDifference = (dataType)((destPtr[k][x] - distTrans[k][x]) * 2.0);
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
// Trignometry functions inside the component evaluation equation
//T a = cos(phi), b = sin(phi), ab = cos(phi)*sin(phi), aa = cos(phi)*cos(phi), bb = sin(phi)*sin(phi), aaa = a * aa, bbb = b * bb, aab = aa * b, abb = a * bb;
//==============================================================================
// Apply Forward Differences to the distTrans pointer
xFwd = finiteDifX(distTrans, h, x, k, i, imageLength);
yFwd = finiteDifY(distTrans, h, k, i, j, imageLength, imageWidth);
zFwd = finiteDifZ(distTrans, h, x, k, i, imageLength, imageHeight);
//==============================================================================
#ifdef DIRECTIONAL
// Begin update for Passed component
//==============================================================================
if (updateComponent == 1) // Rotation Component
{
componentX = (dataType)(yFwd * (((tmpI)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_psi_sin_phi - _cos_phi_sin_psi_sin_theta) / sy)) + ((-tmpK)*((_cos_phi_cos_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz))));
results.rotation.x += (componentX)*(distDifference);
componentY = (dataType)(xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_sin_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_cos_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_sin_psi) / sy)) + ((tmpK)*((_sin_phi_sin_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_cos_psi_cos_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_theta_sin_psi_) / sz)) + ((-tmpK)*(_cos_phi_sin_theta / sz))));
results.rotation.y += (componentY)*(distDifference);
componentZ = (dataType)(xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_phi_sin_psi - _cos_psi_sin_phi_sin_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / sz)) + ((tmpJ)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sz))));
results.rotation.z += (componentZ)*(distDifference);
}
else if (updateComponent == 2) // Scaling Component
{
componentX = (dataType)(xFwd * ((-tmpI)*((_cos_psi_cos_theta) / (_sx_sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (_sx_sx))) + ((-tmpK)*((_sin_theta) / (_sx_sx)))));
results.scaling.x += (componentX)*(distDifference);
componentY = (dataType)(yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / (_sy_sy))) + ((-tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / (_sy_sy))) + ((tmpK)*((_cos_theta_sin_phi) / (_sy_sy)))));
results.scaling.y += (componentY)*(distDifference);
componentZ = (dataType)(zFwd * (((-tmpI)*((_sin_phi_sin_psi - _cos_phi_cos_psi_sin_theta) / (_sz_sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / (_sz_sz))) + ((-tmpK)*((_cos_phi_cos_theta) / (_sz_sz)))));
results.scaling.z += (componentZ)*(distDifference);
}
else if (updateComponent == 3) // Translation Component
{
// Tx
results.translation.x += (-xFwd)*(distDifference);
// Ty
results.translation.y += (-yFwd)*(distDifference);
// Tz
results.translation.z += (-zFwd)*(distDifference);
}
//==============================================================================
#endif // DIRECTIONAL
}
x++;
}
}
}
// Normalize results
if (counter == 0)
{
counter = 1;
}
// NORMALIZATION
if (updateComponent == 1) // Rotation
{
results.rotation.x = results.rotation.x / counter;
results.rotation.y = results.rotation.y / counter;
results.rotation.z = results.rotation.z / counter;
}
else if (updateComponent == 2) // Scaling
{
results.scaling.x = results.scaling.x / counter;
results.scaling.y = results.scaling.y / counter;
results.scaling.z = results.scaling.z / counter;
}
else if (updateComponent == 3) // Translation
{
// Tx
results.translation.x = results.translation.x / counter;
// Ty
results.translation.y = results.translation.y / counter;
// Tz
results.translation.z = results.translation.z / counter;
}
return results;
}
//==============================================================================
Affine_Parameter gradientComponentsClip(dataType ** destPtr, dataType ** distTrans, dataType h, Affine_Parameter * params, size_t imageHeight, size_t imageLength, size_t imageWidth, ClipBox bestFit)
{
Affine_Parameter results;
// Initialize the parameters
size_t k, i, j, x, counter = 0;
// Initialize the results
results.rotation.x = 0.0, results.rotation.y = 0.0, results.rotation.z = 0.0;
results.scaling.x = 0.0, results.scaling.y = 0.0, results.scaling.z = 0.0;
results.translation.x = 0.0, results.translation.y = 0.0, results.translation.z = 0.0;
// Forward difference parameters
dataType xFwd, yFwd, zFwd;
// Derivative component
dataType componentX, componentY, componentZ;
// Stores the difference between two distance pointers
dataType distDifference;
// Shorter Transformation names
dataType phi = params->rotation.x, theta = params->rotation.y, psi = params->rotation.z;
dataType sx = params->scaling.x, sy = params->scaling.y, sz = params->scaling.z;
dataType tx = params->translation.x, ty = params->translation.y, tz = params->translation.z;
// Trigonometric functions as const
// Rotation
const double neg_sin_phi_sin_psi = -sin(phi)*sin(psi), _cos_phi_cos_psi_sin_theta = cos(phi)*cos(psi)*sin(theta), neg_cos_psi_sin_phi = -cos(psi)*sin(phi), _cos_phi_sin_psi_sin_theta = cos(phi)*sin(psi)*sin(theta), _cos_phi_cos_theta = cos(phi)*cos(theta);
const double _cos_phi_sin_psi = cos(phi)*sin(psi), neg_cos_phi_sin_psi = -cos(phi)*sin(psi), _cos_psi_sin_phi_sin_theta = cos(psi)*sin(phi)*sin(theta), _cos_phi_cos_psi = cos(phi)*cos(psi), _sin_phi_sin_psi_sin_theta = sin(phi)*sin(psi)*sin(theta), _cos_theta_sin_phi = cos(theta) * sin(phi);
const double _cos_psi_sin_theta = cos(psi)*sin(theta), _sin_psi_sin_theta = sin(psi)*sin(theta), _cos_theta = cos(theta), _cos_psi_cos_theta_sin_phi = cos(psi)*cos(theta)*sin(phi), _cos_theta_sin_phi_sin_psi = cos(theta)*sin(phi)*sin(psi), _sin_phi_sin_theta = sin(phi)*sin(theta);
const double _cos_phi_cos_psi_cos_theta = cos(phi)*cos(psi)*cos(theta), _cos_phi_cos_theta_sin_psi_ = cos(phi)*cos(theta)*sin(psi), _cos_phi_sin_theta = cos(phi)*sin(theta);
// Scaling
const double _cos_psi_cos_theta = cos(psi)*cos(theta), _cos_theta_sin_psi = cos(theta)*sin(psi), _sin_theta = sin(theta);
const double _sin_phi_sin_psi = sin(phi)*sin(psi);
const double _cos_psi_sin_phi = cos(psi)*sin(phi);
// Scales
const dataType _sx_sx = sx * sx, _sy_sy = sy * sy, _sz_sz = sz * sz;
// Begin Evaluation
for (k = bestFit.k_min; k < bestFit.k_max + 1; k++)
{
for (j = bestFit.j_min; j < bestFit.j_max + 1; j++)
{
// 2D to 1D representation for i, j
x = x_new(0, j, imageLength);
for (i = bestFit.i_min; i < bestFit.i_max + 1; i++)
{
// 2D to 1D representation for i, j
//x = x_new(i, j, imageLength);
if (NFunction(destPtr[k][x], distTrans[k][x], NDelta) == 1)
{
counter++;
// Store the distance function difference
distDifference = (dataType)((destPtr[k][x] - distTrans[k][x]) * 2.0);
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
// Apply Forward Differences to the distTrans pointer
xFwd = finiteDifX(distTrans, h, x, k, i, imageLength);
yFwd = finiteDifY(distTrans, h, k, i, j, imageLength, imageWidth);
zFwd = finiteDifZ(distTrans, h, x, k, i, imageLength, imageHeight);
// Evaluate Individual Gradient Components
#ifdef DIRECTIONAL
// Rotation Components - Directionnal
componentX = (dataType)(yFwd * (((tmpI)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_psi_sin_phi - _cos_phi_sin_psi_sin_theta) / sy)) + ((-tmpK)*((_cos_phi_cos_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz))));
results.rotation.x += (componentX)*(distDifference);
componentY = (dataType)(xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_sin_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_cos_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_sin_psi) / sy)) + ((tmpK)*((_sin_phi_sin_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_cos_psi_cos_theta) / sz)) + ((tmpJ)*((_cos_phi_cos_theta_sin_psi_) / sz)) + ((-tmpK)*(_cos_phi_sin_theta / sz))));
results.rotation.y += (componentY)*(distDifference);
componentZ = (dataType)(xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / sy)) + ((tmpJ)*((neg_cos_phi_sin_psi - _cos_psi_sin_phi_sin_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / sz)) + ((tmpJ)*((neg_sin_phi_sin_psi + _cos_phi_cos_psi_sin_theta) / sz))));
results.rotation.z += (componentZ)*(distDifference);
// Directional Scale Components
componentX = (dataType)(xFwd * ((-tmpI)*((_cos_psi_cos_theta) / (_sx_sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (_sx_sx))) + ((-tmpK)*((_sin_theta) / (_sx_sx)))));
results.scaling.x += (componentX)*(distDifference);
componentY = (dataType)(yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_sin_theta) / (_sy_sy))) + ((-tmpJ)*((_cos_phi_cos_psi - _sin_phi_sin_psi_sin_theta) / (_sy_sy))) + ((tmpK)*((_cos_theta_sin_phi) / (_sy_sy)))));
results.scaling.y += (componentY)*(distDifference);
componentZ = (dataType)(zFwd * (((-tmpI)*((_sin_phi_sin_psi - _cos_phi_cos_psi_sin_theta) / (_sz_sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_sin_theta) / (_sz_sz))) + ((-tmpK)*((_cos_phi_cos_theta) / (_sz_sz)))));
results.scaling.z += (componentZ)*(distDifference);
#endif // DIRECTIONAL
// Translation Parameters - Always directional
// Tx
results.translation.x += (-xFwd)*(distDifference);
// Ty
results.translation.y += (-yFwd)*(distDifference);
// Tz
results.translation.z += (-zFwd)*(distDifference);
}
x++;
}
}
}
// Normalize results
if (counter == 0)
{
counter = 1;
}
// NORMALIZATION
results.scaling.x = results.scaling.x / counter;
results.scaling.y = results.scaling.y / counter;
results.scaling.z = results.scaling.z / counter;
results.rotation.x = results.rotation.x / counter;
results.rotation.y = results.rotation.y / counter;
results.rotation.z = results.rotation.z / counter;
results.translation.x = results.translation.x / counter;
// Ty
results.translation.y = results.translation.y / counter;
// Tz
results.translation.z = results.translation.z / counter;
return results;
}
//==============================================================================
Affine_Parameter registration3D(dataType ** fixedData, dataType ** movingData, Affine_Parameter initTransform, dataType step_size, dataType tol, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType centroid[3], Registration_Params params)
{
//==============================================================================
size_t k, i, dim2D = imageLength * imageWidth;
int iteration = 0;
dataType firstCpuTime, secondCpuTime, regStartCpuTime, regStopCpuTime, regTotalCpuTimen = 0.;
dataType energyTotalCpuTime = 0., distanceTotalCpuTime = 0., gradientTotalCpuTime = 0., transformationTotalCpuTime = 0.;
//==============================================================================
// Affine Parameters
Affine_Parameter affineResult, affineTmp;
//==============================================================================
// Create a new shape Pointers to be used
dataType ** destPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // distances for destination
//==============================================================================
// ClipBox Variable
dataType ** movInitPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // distances for Moving
//==============================================================================
ClipBox coordFixed, coordMoving, bestFit, coordMovingTmp;
//==============================================================================
const size_t mem_alloc_2D_block = sizeof(dataType) * dim2D;
//==============================================================================
const dataType large_value = 50000;
//==============================================================================
if (params.use_clipbox)
{
for (i = 0; i < imageHeight; i++)
{
movInitPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
}
//==============================================================================
// Initializations of Pointers
for (i = 0; i < imageHeight; i++)
{
destPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
//==============================================================================
// Instantiate Affine Parameters
affineResult.rotation = initTransform.rotation;
affineResult.scaling = initTransform.scaling;
affineResult.translation = initTransform.translation;
//==============================================================================
// Energy tmp optimal, stop boolean
dataType energyTmp;
bool stopCond = false;
//==============================================================================
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
fastSweepingFunction_3D(destPtr, fixedData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
//==============================================================================
if (params.use_clipbox)
{
// Initial dist. fn for moving image before adding any transformation
fastSweepingFunction_3D(movInitPtr, movingData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
if (params.use_clipbox)
{
//==============================================================================
// ClipBoxes from the calculated distances
// Finding the clip box points for the fixed image
coordFixed = findClipBoxSingle(destPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
coordMoving = findClipBoxSingle(movInitPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
// Free after
for (k = 0; k < imageHeight; k++)
{
free(movInitPtr[k]);
}
free(movInitPtr);
movInitPtr = NULL;
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Distance calc before Registration CPU time: %e secs\n\n", secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Begin Registration of Distances between shapes
// Start Timing the Registration Process
#ifdef MEASURE_TIME
regStartCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
// Create a new shape Pointers to be used
dataType ** transPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // Transformed Ptr
dataType ** distTransPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // distances for Transformed Ptr
for (i = 0; i < imageHeight; i++)
{
transPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
distTransPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
//==============================================================================
while (!stopCond)
{
//==============================================================================
// Timing The Transformation Inside the registration function
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
transform3DImage(movingData, transPtr, affineResult.translation, affineResult.scaling, affineResult.rotation, imageHeight, imageLength, imageWidth, params.imageBackground, centroid, params.parallelize);
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
transformationTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
if (params.use_clipbox)
{
//==============================================================================
// Transform the coordMoving clip box using calc. transform component results
// Copy to coordMovingTmp
coordMovingTmp = coordMoving;
transformClip(&coordMovingTmp, affineResult.translation, affineResult.scaling, affineResult.rotation, centroid, imageHeight, imageLength, imageWidth);
// Find the bestFit from transformed clip
bestFit.k_min = min(coordFixed.k_min, coordMovingTmp.k_min);
bestFit.i_min = min(coordFixed.i_min, coordMovingTmp.i_min);
bestFit.j_min = min(coordFixed.j_min, coordMovingTmp.j_min);
bestFit.k_max = max(coordFixed.k_max, coordMovingTmp.k_max);
bestFit.i_max = max(coordFixed.i_max, coordMovingTmp.i_max);
bestFit.j_max = max(coordFixed.j_max, coordMovingTmp.j_max);
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Registration Transformation calc. CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (params.use_clipbox)
{
fSweeping3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground, bestFit);
}
else
{
fastSweepingFunction_3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Distance calc during Registration CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Evaluate Energy Function
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (params.use_clipbox)
{
// Evaluate Energy Function - L2 Norm Between the two calc. distances within the band and clipbox
energyTmp = energyFunctionClip(destPtr, distTransPtr, bestFit, imageLength);
}
else
{
// Evaluate Energy Function - L2 Norm Between the two calc. distances
energyTmp = energyFunction(destPtr, distTransPtr, imageHeight, imageLength, imageWidth, params.h);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
energyTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Energy Function calc. CPU time at iteration %4d: %e secs\n\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Print Pre-evaluate affine values
#ifdef CONSOLE_OUTPUT
printf("Energy = %5.5lf, iteration %4d, Phi = %3.5lf, Theta = %3.5lf, Psi = %3.5lf, Sx = %2.5lf, Sy = %2.5lf, Sz = %2.5lf, Tx = %2.5lf, Ty = %2.5lf, Tz = %2.5lf\n",
energyTmp, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
#endif
//==============================================================================
// Check Stoping condition with tolerance and number of ierations
if (energyTmp < tol || iteration == params.max_iterations)
{
stopCond = true;
for (k = 0; k < imageHeight; k++)
{
free(destPtr[k]);
free(transPtr[k]);
free(distTransPtr[k]);
}
free(destPtr);
destPtr = NULL;
//
free(transPtr);
transPtr = NULL;
//
free(distTransPtr);
distTransPtr = NULL;
//==============================================================================
printf("Total distance Function calc. CPU Time is: %e secs\n", distanceTotalCpuTime);
printf("Total energy Function calc. CPU Time is: %e secs\n", energyTotalCpuTime);
printf("Total gradient Function calc. CPU Time is: %e secs\n", gradientTotalCpuTime);
printf("Total transformation Function calc. CPU Time is: %e secs\n", transformationTotalCpuTime);
//==============================================================================
// Print the Calculated Transformation Parameters At the End of Registration
printf("Energy = %5.5lf, iteration %4d, Phi = %3.5lf, Theta = %3.5lf, Psi = %3.5lf, Sx = %2.5lf, Sy = %2.5lf, Sz = %2.5lf, Tx = %2.5lf, Ty = %2.5lf, Tz = %2.5lf\n",
energyTmp, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
//==============================================================================
}
else
{
//==============================================================================
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (params.use_clipbox)
{
affineTmp = gradientComponentsClip(destPtr, distTransPtr, params.h, &affineResult, imageHeight, imageLength, imageWidth, bestFit);
}
else
{
affineTmp = gradientComponents(destPtr, distTransPtr, params.h, &affineResult, imageHeight, imageLength, imageWidth);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
gradientTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Gradient Components calc. CPU time at iteration %4d: %e secs\n\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Set new values for affine temp results
// Rotation
affineResult.rotation.x += params.rotation_weight * step_size*affineTmp.rotation.x;
affineResult.rotation.y += params.rotation_weight * step_size*affineTmp.rotation.y;
affineResult.rotation.z += params.rotation_weight * step_size*affineTmp.rotation.z;
// Scaling
affineResult.scaling.x += params.scaling_weight * step_size*affineTmp.scaling.x;
affineResult.scaling.y += params.scaling_weight * step_size*affineTmp.scaling.y;
affineResult.scaling.z += params.scaling_weight * step_size*affineTmp.scaling.z;
//Translation
affineResult.translation.x += params.translation_weight * step_size*affineTmp.translation.x;
affineResult.translation.y += params.translation_weight * step_size*affineTmp.translation.y;
affineResult.translation.z += params.translation_weight * step_size*affineTmp.translation.z;
//==============================================================================
// Increase Iteration
iteration++;
//==============================================================================
}
}
//==============================================================================
// Stop Timing the Registration Process
#ifdef MEASURE_TIME
regStopCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store Time For Each Registration run
regTotalCpuTimen = regStopCpuTime - regStartCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Total Registration Function calc. CPU Time is: %e secs\n\n", regTotalCpuTimen);
#endif
//==============================================================================
return affineResult;
//==============================================================================
}
//==============================================================================
Affine_Parameter registrationCoorDinateDescent3D(dataType ** fixedData, dataType ** movingData, Affine_Parameter initTransform, dataType step_size, dataType tol, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType centroid[3], Registration_Params params)
{
dataType rotation_weight = 1.0;
// Scaling
dataType scaling_weight = 1.0;
// Translation
dataType translation_weight = 1.0;
//==============================================================================
int components = 1, switchcomponent;
//==============================================================================
dataType stepsize = step_size;
//==============================================================================
size_t k, i, dim2D = imageLength * imageWidth;
int iteration = 0, max_ter = 1000;
int count_rejected = 0, state_accept, count_steps_reset = 0, max_resets = 9;
//==============================================================================
dataType firstCpuTime, secondCpuTime, regStartCpuTime, regStopCpuTime, regTotalCpuTimen = 0.;
dataType energyTotalCpuTime = 0., distanceTotalCpuTime = 0., gradientTotalCpuTime = 0., transformationTotalCpuTime = 0.;
// Affine tmp prev init
Point3D rotationTran = { 0.0, 0.0, 0.0 };
Point3D scalingTran = { 1.0, 1.0, 1.0 };
Point3D translationTran = { 0.0, 0.0, 0.0 };
Affine_Parameter affineResult, affineTmp, affineTmp_prev, affinePrev;
affineTmp_prev.rotation = rotationTran, affineTmp_prev.scaling = scalingTran, affineTmp_prev.translation = translationTran;
// Create a new shape Pointers to be used
dataType ** destPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // distances for destination
//==============================================================================
const size_t mem_alloc_2D_block = sizeof(dataType)*dim2D;
//==============================================================================
const dataType large_value = 50000;
//==============================================================================
// USE_CLIP
ClipBox coordFixed, coordMoving, bestFit, coordMovingTmp; // Clipbox for bestFit of both fixed and moving images, Moving image clipbox
dataType ** movInitPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // distances for Moving
if (params.use_clipbox)
{
for (i = 0; i < imageHeight; i++)
{
movInitPtr[i] = (dataType*)malloc(mem_alloc_2D_block);
}
}
//==============================================================================
// Initializations of Pointers
for (i = 0; i < imageHeight; i++)
{
destPtr[i] = (dataType*)malloc(mem_alloc_2D_block);
}
// Initialize to same background - default value is 255, 0, 0
//initialize3dArrayD(destPtr, imageLength, imageWidth, imageHeight, foregound);
//==============================================================================
// Instantiate Affine Parameters
affineResult.rotation = initTransform.rotation;
affineResult.scaling = initTransform.scaling;
affineResult.translation = initTransform.translation;
//==============================================================================
Point3D init_trans = { 0,0,0 };
Point3D init_rot = { 0,0,0 };
Point3D init_scale = { 1,1,1 };
affinePrev = affineResult; // Start sames as affine results
//==============================================================================
// Energy tmp optimal, stop boolean
double energyTmp, prev_energy = DBL_MAX;
bool stopCond = false;
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
fastSweepingFunction_3D(destPtr, fixedData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
//==============================================================================
// USE_CLIP
if (params.use_clipbox)
{
// Initial dist. fn for moving image before adding any transformation
fastSweepingFunction_3D(movInitPtr, movingData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
// USE_CLIP
if (params.use_clipbox)
{
//==============================================================================
// Finding the clip box points for the fixed image
coordFixed = findClipBoxSingle(destPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
coordMoving = findClipBoxSingle(movInitPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
// Free after
for (k = 0; k < imageHeight; k++)
{
free(movInitPtr[k]);
}
free(movInitPtr);
movInitPtr = NULL;
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Distance calc before Registration CPU time: %e secs\n\n", secondCpuTime - firstCpuTime);
#endif
// Begin Registration of Distances between shapes
// Start Timing the Registration Process
#ifdef MEASURE_TIME
regStartCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
// Create a new shape Pointers to be used
dataType ** transPtr = (dataType**)malloc(sizeof(dataType*) * imageHeight); // Transformed Ptr
dataType ** distTransPtr = (dataType**)malloc(sizeof(dataType*) * imageHeight); // distances for Transformed Ptr
for (i = 0; i < imageHeight; i++)
{
transPtr[i] = (dataType*)malloc(mem_alloc_2D_block);
distTransPtr[i] = (dataType*)malloc(mem_alloc_2D_block);
}
//==============================================================================
while (!stopCond)
{
// Timing The Transformation Inside the registration function
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
transform3DImage(movingData, transPtr, affineResult.translation, affineResult.scaling, affineResult.rotation, imageHeight, imageLength, imageWidth, params.imageBackground, centroid, params.parallelize);
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
transformationTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
// USE_CLIP
if (params.use_clipbox)
{
//==============================================================================
// Transform the coordMoving clip box using calc. transform component results
// Copy to coordMovingTmp
coordMovingTmp = coordMoving;
transformClip(&coordMovingTmp, affineResult.translation, affineResult.scaling, affineResult.rotation, centroid, imageHeight, imageLength, imageWidth);
// Find the bestFit from transformed clip
bestFit.k_min = min(coordFixed.k_min, coordMovingTmp.k_min);
bestFit.i_min = min(coordFixed.i_min, coordMovingTmp.i_min);
bestFit.j_min = min(coordFixed.j_min, coordMovingTmp.j_min);
bestFit.k_max = max(coordFixed.k_max, coordMovingTmp.k_max);
bestFit.i_max = max(coordFixed.i_max, coordMovingTmp.i_max);
bestFit.j_max = max(coordFixed.j_max, coordMovingTmp.j_max);
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Registration Transformation calc. CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
// USE_CLIP
if (params.use_clipbox)
{
// fSweeping3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, 1, large_value, foregound, bestFit);
fastSweepingFunction_3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
else
{
fastSweepingFunction_3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
#ifdef CONSOLE_OUTPUT
printf("Distance calc during Registration CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
// Evaluate Energy Function - L2 Norm Between the two calc. distances
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
// Evaluate Energy Function - L2 Norm Between the two calc. distances within the band and clipbox
// USE_CLIP
if (params.use_clipbox)
{
energyTmp = energyFunctionClip(destPtr, distTransPtr, bestFit, imageLength);
//energyTmp = energyFunction(destPtr, distTransPtr, imageHeight, imageLength, imageWidth, h);
}
else {
energyTmp = energyFunction(destPtr, distTransPtr, imageHeight, imageLength, imageWidth, params.h);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
energyTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
dataType diff_abs = (dataType)fabs(energyTmp - prev_energy), accept_diff = (dataType) 1e-04;
// Check if current energy has reduced from previous
if (energyTmp <= prev_energy)
{
prev_energy = energyTmp;
//==============================================================================
state_accept = 0;
//==============================================================================
affinePrev = affineResult;
//==============================================================================
}
else // Use a new component to calculate the energy if current gives worse
{
state_accept = 1;
//==============================================================================
// Use previous components that were accepted and recalculate the previous distance pointer.
affineResult = affinePrev;
//==============================================================================
}
//==============================================================================
// Adjust step size after a full cycle through components
if (count_rejected >= 3) // Maximum rejections
{
// Adjust step size after a full cycle through components
stepsize = (dataType)(stepsize / 2.0);
//==============================================================================
count_rejected = 0; // Reset
//==============================================================================
}
//==============================================================================
// Check if gone lower than acceptable minimum
if (stepsize < 0.004)
{
stepsize = step_size; // Reset to orignal to start all over again
count_steps_reset++;
}
//==============================================================================
if ((state_accept == 1))
{
count_rejected++; // Increment
//==============================================================================
}
else if (state_accept == 0)
{
count_rejected = 0; // Reset
count_steps_reset = 0;
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Energy Function calc. CPU time at iteration %4d: %e secs\n\n", iteration, secondCpuTime - firstCpuTime);
#endif
// Print Pre-evaluate affine values
#ifdef CONSOLE_OUTPUT
printf("Energy = %5.5lf, iteration %4d, Phi = %3.5lf, Theta = %3.5lf, Psi = %3.5lf, Sx = %2.5lf, Sy = %2.5lf, Sz = %2.5lf, Tx = %2.5lf, Ty = %2.5lf, Tz = %2.5lf\n",
energyTmp, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
#endif
//==============================================================================
// Check Stoping condition with tolerance and number of ierations
if (energyTmp < tol || iteration == max_ter || count_steps_reset == max_resets)
{
//==============================================================================
affineResult = affinePrev;
stopCond = true;
//==============================================================================
printf("Total distance Function calc. CPU Time is: %e secs\n", distanceTotalCpuTime);
printf("Total energy Function calc. CPU Time is: %e secs\n", energyTotalCpuTime);
printf("Total Coordinate Descent Function calc. CPU Time is: %e secs\n", gradientTotalCpuTime);
printf("Total transformation Function calc. CPU Time is: %e secs\n", transformationTotalCpuTime);
//==============================================================================
// Print the Calculated Transformation Parameters At the End of Registration
printf("Energy = %8.8lf, iteration %4d, Phi = %3.8lf, Theta = %3.8lf, Psi = %3.8lf, Sx = %2.8lf, Sy = %2.8lf, Sz = %2.8lf, Tx = %2.8lf, Ty = %2.8lf, Tz = %2.8lf\n",
prev_energy, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
//==============================================================================
for (k = 0; k < imageHeight; k++)
{
free(destPtr[k]);
free(transPtr[k]);
free(distTransPtr[k]);
}
free(destPtr);
destPtr = NULL;
//
free(transPtr);
transPtr = NULL;
//
free(distTransPtr);
distTransPtr = NULL;
//==============================================================================
}
else
{
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (components > 3)
{
components = 1; // Reset to the first component to start all over again
}
//==============================================================================
switchcomponent = components;
//==============================================================================
// USE_CLIP
if (params.use_clipbox)
{
// affineTmp = gradCoorDescentCompClip(destPtr, distTransPtr, 1.0, &affineResult, imageHeight, imageLength, imageWidth, switchcomponent, bestFit);
affineTmp = gradientCoorDinateDescentComp(destPtr, distTransPtr, 1.0, &affineResult, imageHeight, imageLength, imageWidth, switchcomponent);
}
else
{
affineTmp = gradientCoorDinateDescentComp(destPtr, distTransPtr, 1.0, &affineResult, imageHeight, imageLength, imageWidth, switchcomponent);
}
//==============================================================================
switch (switchcomponent)
{
case 1:
// Set new values for affine temp results
// Rotation
affineResult.rotation.x += rotation_weight * stepsize*affineTmp.rotation.x;
affineResult.rotation.y += rotation_weight * stepsize*affineTmp.rotation.y;
affineResult.rotation.z += rotation_weight * stepsize*affineTmp.rotation.z;
//==============================================================================
components++;
break;
case 2:
// Scaling
affineResult.scaling.x += scaling_weight * stepsize*affineTmp.scaling.x;
affineResult.scaling.y += scaling_weight * stepsize*affineTmp.scaling.y;
affineResult.scaling.z += scaling_weight * stepsize*affineTmp.scaling.z;
//==============================================================================
components++;
break;
case 3:
//Translation
affineResult.translation.x += translation_weight * stepsize*affineTmp.translation.x;
affineResult.translation.y += translation_weight * stepsize*affineTmp.translation.y;
affineResult.translation.z += translation_weight * stepsize*affineTmp.translation.z;
//==============================================================================
components++;
break;
default:
break;
}
//==============================================================================
affineTmp_prev = affineTmp;
//==============================================================================
// Increase Iteration
iteration++;
}
}
//==============================================================================
#ifdef MEASURE_TIME
regStopCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store Time For Each Registration run
regTotalCpuTimen = regStopCpuTime - regStartCpuTime;
#endif
#ifdef CONSOLE_OUTPUT
printf("Total Registration Function calc. CPU Time is: %e secs\n\n", regTotalCpuTimen);
#endif
//==============================================================================
return affineResult;
//==============================================================================
}
//==============================================================================
Affine_Parameter registrationStochastic3D(dataType ** fixedData, dataType ** movingData, Affine_Parameter initTransform, dataType step_size, dataType tol, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType centroid[3], Registration_Params params)
{
//==============================================================================
// Randomly generated points will be stored here
Random3DPoints * generated_points = malloc(sizeof(Random3DPoints) * params.rand_points);
// The distances calculated from the random generated points
distanceCalculated * distances_calculated = malloc(sizeof(distanceCalculated) * params.rand_points);
// The neighbour points distance values will be stored in this structs for each of the 3 directions
xNbDistances * xfwd_dist = malloc(sizeof(xNbDistances) * params.rand_points);
yNbDistances * yfwd_dist = malloc(sizeof(yNbDistances) * params.rand_points);
zNbDistances * zfwd_dist = malloc(sizeof(zNbDistances) * params.rand_points);
// The finite differences calc. from neighbour pts differences will be stored in this struct.
Finite_Differences * fwd_vals = malloc(sizeof(Finite_Differences) * params.rand_points);
//==============================================================================
size_t k, i, j, l, x, dim2D = imageLength * imageWidth, maxSurfacePts = (size_t)(0.05 * dim2D * imageHeight);
int iteration = 0;
const dataType h = 1.0;
dataType firstCpuTime, secondCpuTime, regStartCpuTime, regStopCpuTime, regTotalCpuTimen = 0.;
dataType energyTotalCpuTime = 0., distanceTotalCpuTime = 0., gradientTotalCpuTime = 0., transformationTotalCpuTime = 0., conversionTotalCpuTime = 0., surfacePtsTotalCpuTime = 0., edgeDetectionTotalCpuTime = 0., generateRandomPtsTotalCpuTime = 0., distanceCalculateTotalCpuTime = 0., finiteDiffereceTotalCpuTime = 0.;
//==============================================================================
const size_t mem_alloc_2D_block = sizeof(dataType)*dim2D;
//==============================================================================
const dataType large_value = 50000;
//==============================================================================
Point3D * surface_points = malloc(sizeof(Point3D) * maxSurfacePts);
//==============================================================================
// Affine Parameters
Affine_Parameter affineResult;
//==============================================================================
// Create new fixed dist. Pointers to be used
dataType ** destPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // distances for destination
//==============================================================================
dataType ** movInitPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // distances for Moving
// Calc. the narrow band areas for fixed and moving dist. fn's
dataType ** fixedNBandPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // narrow band area for fixed dista. fn
dataType ** movingNBandPtr = (dataType **)malloc(sizeof(dataType *) * imageHeight); // narrow band area for moving dista. fn
dataType centroidMovingBandArea[3];
ClipBox coordFixed, coordMoving, bestFit, coordMovingTmp;
if (params.use_clipbox)
{
for (i = 0; i < imageHeight; i++)
{
movInitPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
}
//==============================================================================
// Initializations of Pointers
for (i = 0; i < imageHeight; i++)
{
destPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
//==============================================================================
// Instantiate Affine Parameters
affineResult.rotation = initTransform.rotation;
affineResult.scaling = initTransform.scaling;
affineResult.translation = initTransform.translation;
//==============================================================================
// Energy tmp optimal, stop boolean
dataType energyTmp;
bool stopCond = false;
//==============================================================================
//Edge detection fn for moving binary trnasformed image
dataType ** edgeMovingPointer = (dataType **)malloc(sizeof(dataType*) * imageHeight); // distances for Transformed Ptr
//==============================================================================
if (!params.use_FSM)
{
for (i = 0; i < imageHeight; i++)
{
edgeMovingPointer[i] = malloc(mem_alloc_2D_block);
}
}
//==============================================================================
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
fastSweepingFunction_3D(destPtr, fixedData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
//==============================================================================
if (params.use_clipbox)
{
//==============================================================================
fastSweepingFunction_3D(movInitPtr, movingData, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
//==============================================================================
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
if (params.use_clipbox)
{
//==============================================================================
// Finding the clip box points for the fixed image dist. unsigned
coordFixed = findClipBoxSingle(destPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
// From unsigned dist. fn
coordMoving = findClipBoxSingle(movInitPtr, imageHeight, imageLength, imageWidth);
//==============================================================================
if (params.binary_nband)
{
for (i = 0; i < imageHeight; i++)
{
fixedNBandPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
movingNBandPtr[i] = (dataType *)malloc(mem_alloc_2D_block);
}
// Fill the narrow band areas for fixed, moving respectively
fillNarrowBandArea(destPtr, fixedNBandPtr, imageHeight, imageLength, imageWidth, params.imageForeground, params.imageBackground);
fillNarrowBandArea(movInitPtr, movingNBandPtr, imageHeight, imageLength, imageWidth, params.imageForeground, params.imageBackground);
// Centroid for moving narrow band area
centroidImage(movingNBandPtr, centroidMovingBandArea, imageHeight, imageLength, imageWidth, params.imageBackground);
}
//==============================================================================
// Free after
for (k = 0; k < imageHeight; k++)
{
free(movInitPtr[k]);
}
free(movInitPtr);
movInitPtr = NULL;
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Distance calc before Registration CPU time: %e secs\n\n", secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Begin Registration of Distances between shapes
// Start Timing the Registration Process
#ifdef MEASURE_TIME
regStartCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
// Create a new shape Pointers to be used
dataType ** transPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // Transformed Ptr
dataType ** transMovingPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // Transformed Ptr
dataType ** distTransPtr = (dataType **)malloc(sizeof(dataType*) * imageHeight); // distances for Transformed Ptr
//==============================================================================
for (i = 0; i < imageHeight; i++)
{
transPtr[i] = malloc(mem_alloc_2D_block);
transMovingPtr[i] = malloc(mem_alloc_2D_block);
distTransPtr[i] = malloc(mem_alloc_2D_block);
}
//==============================================================================
while (!stopCond)
{
//==============================================================================
// Timing The Transformation Inside the registration function
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
transform3DImage(movingData, transPtr, affineResult.translation, affineResult.scaling, affineResult.rotation, imageHeight, imageLength, imageWidth, params.imageBackground, centroid, params.parallelize);
if (params.binary_nband)
{
// Transform the moving narrow band area
transform3DImage(movingNBandPtr, transMovingPtr, affineResult.translation, affineResult.scaling, affineResult.rotation, imageHeight, imageLength, imageWidth, params.imageBackground, centroidMovingBandArea, params.parallelize);
// Copy back to movingNBandPtr
dataType ** tmpPtr = NULL;
tmpPtr = movingNBandPtr;
movingNBandPtr = transMovingPtr;
transMovingPtr = tmpPtr;
}
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
transformationTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
if (params.use_clipbox)
{
//==============================================================================
// Transform the coordMoving clip box using calc. transform component results
// Copy to coordMovingTmp
coordMovingTmp = coordMoving;
transformClip(&coordMovingTmp, affineResult.translation, affineResult.scaling, affineResult.rotation, centroid, imageHeight, imageLength, imageWidth);
// Find the bestFit from transformed clip
bestFit.k_min = min(coordFixed.k_min, coordMovingTmp.k_min);
bestFit.i_min = min(coordFixed.i_min, coordMovingTmp.i_min);
bestFit.j_min = min(coordFixed.j_min, coordMovingTmp.j_min);
bestFit.k_max = max(coordFixed.k_max, coordMovingTmp.k_max);
bestFit.i_max = max(coordFixed.i_max, coordMovingTmp.i_max);
bestFit.j_max = max(coordFixed.j_max, coordMovingTmp.j_max);
//==============================================================================
}
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Registration Transformation calc. CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Apply distance function between transPtr and distTrans
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (params.use_clipbox)
{
fSweeping3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground, bestFit);
}
else
{
fastSweepingFunction_3D(distTransPtr, transPtr, imageLength, imageWidth, imageHeight, params.h, large_value, params.imageForeground);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
distanceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Distance calc during Registration CPU time at iteration %4d: %e secs\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Evaluate Energy Function - L2 Norm Between the two calc. distances
// Begin Record Time
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (params.use_clipbox)
{
if (params.binary_nband)
{
// For using narrowband areas
energyTmp = energyFunctionClipBandArea(destPtr, distTransPtr, bestFit, imageLength, fixedNBandPtr, movingNBandPtr, params.imageForeground);
}
else
{
energyTmp = energyFunctionClip(destPtr, distTransPtr, bestFit, imageLength);
}
}
else
{
energyTmp = energyFunction(destPtr, distTransPtr, imageHeight, imageLength, imageWidth, params.h);
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
energyTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Energy Function calc. CPU time at iteration %4d: %e secs\n\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Print Pre-evaluate affine values
if (params.displayRegistrationOutputs)
{
printf("Energy = %5.5lf, iteration %4d, Phi = %3.5lf, Theta = %3.5lf, Psi = %3.5lf, Sx = %2.5lf, Sy = %2.5lf, Sz = %2.5lf, Tx = %2.5lf, Ty = %2.5lf, Tz = %2.5lf\n",
energyTmp, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
}
//==============================================================================
// Check Stoping condition with tolerance and number of ierations
if (energyTmp < tol || iteration == params.max_iterations)
{
//==============================================================================
stopCond = true;
//==============================================================================
free(surface_points);
//==============================================================================
// Free up the destination ptr before exiting
for (k = 0; k < imageHeight; k++)
{
free(destPtr[k]);
if (params.binary_nband)
{
// Free the band areas
free(fixedNBandPtr[k]);
free(movingNBandPtr[k]);
}
if (!params.use_FSM)
{
// free moving edge transfromed
free(edgeMovingPointer[k]);
}
//
free(transMovingPtr[k]);
//
free(distTransPtr[k]);
//
free(transPtr[k]); // free moving transfromed
}
free(destPtr);
destPtr = NULL;
if (params.binary_nband)
{
// Band areas
free(fixedNBandPtr);
free(movingNBandPtr);
fixedNBandPtr = NULL;
movingNBandPtr = NULL;
}
if (!params.use_FSM)
{
// free moving edge transfromed
free(edgeMovingPointer);
edgeMovingPointer = NULL;
}
//
free(transMovingPtr);
transMovingPtr = NULL;
//
free(distTransPtr);
distTransPtr = NULL;
//
free(transPtr);
transPtr = NULL;
//==============================================================================
printf("Total distance Function calc. CPU Time is: %e secs\n", distanceTotalCpuTime);
printf("Total energy Function calc. CPU Time is: %e secs\n", energyTotalCpuTime);
printf("Total SGD Function calc. CPU Time is: %e secs\n", gradientTotalCpuTime);
printf("Total transformation Function calc. CPU Time is: %e secs\n", transformationTotalCpuTime);
//==============================================================================
// Print the Calculated Transformation Parameters At the End of Registration
printf("Energy = %5.5lf, iteration %4d, Phi = %3.5lf, Theta = %3.5lf, Psi = %3.5lf, Sx = %2.5lf, Sy = %2.5lf, Sz = %2.5lf, Tx = %2.5lf, Ty = %2.5lf, Tz = %2.5lf\n",
energyTmp, iteration, affineResult.rotation.x, affineResult.rotation.y, affineResult.rotation.z, affineResult.scaling.x, affineResult.scaling.y,
affineResult.scaling.z, affineResult.translation.x, affineResult.translation.y, affineResult.translation.z);
//==============================================================================
}
else
{
//==============================================================================
// Call th SGD Method
//==============================================================================
// Set up the other parameters
//==============================================================================
// Forward difference parameters
dataType xFwd, yFwd, zFwd;
// Derivative component
dataType componentX, componentY, componentZ;
// Stores the difference between two distance pointers
dataType distDifference;
// Shorter Transformation names
dataType phi = affineResult.rotation.x, theta = affineResult.rotation.y, psi = affineResult.rotation.z;
dataType sx = affineResult.scaling.x, sy = affineResult.scaling.y, sz = affineResult.scaling.z;
dataType tx = affineResult.translation.x, ty = affineResult.translation.y, tz = affineResult.translation.z;
//==============================================================================
// Angles to radians
dataType _cos_phi = (dataType)cos(phi), _cos_psi = (dataType)cos(psi), _cos_theta = (dataType)cos(theta);
dataType _cos_phi_neg = (dataType)(-1 * cos(phi)), _cos_psi_neg = (dataType)(-1 * cos(psi)), _cos_theta_neg = (dataType)(-1 * cos(theta));
dataType _sin_phi = (dataType)sin(phi), _sin_psi = (dataType)sin(psi), _sin_theta = (dataType)sin(theta);
dataType _sin_phi_neg = (dataType)(-1 * sin(phi)), _sin_psi_neg = (dataType)(-1 * sin(psi)), _sin_theta_neg = (dataType)(-1 * sin(theta));
//==============================================================================
dataType _cos_phi_psi = _cos_phi * _cos_psi; // cos(phi)*cos(psi)
dataType _cos_phi_theta = _cos_phi * _cos_theta; // cos(phi)*cos(theta)
dataType _cos_psi_theta = _cos_psi * _cos_theta; // cos(psi)*cos(theta)
dataType _cos_phi_theta_psi = _cos_phi_psi * _cos_theta;
dataType _sin_psi_theta = _sin_psi * _sin_theta;// sin(psi)*sin(theta)
dataType _sin_phi_theta = _sin_phi * _sin_theta;// sin(phi)*sin(theta)
dataType _sin_phi_psi = _sin_phi * _sin_psi;// sin(phi)*sin(psi)
dataType _sin_phi_psi_theta = _sin_phi_psi * _sin_theta; // sin(phi)*sin(psi)*sin(theta)
//==============================================================================
dataType _sin_phi_neg_sin_psi = _sin_phi_neg * _sin_psi; // (-sin(phi)*sin(psi)
dataType _cos_phi_psi_sin_theta = _cos_phi_psi * _sin_theta; // cos(phi)*cos(psi)*sin(theta)
dataType _cos_psi_neg_sin_phi = _cos_psi_neg * _sin_phi; // (-cos(psi)*sin(phi)
dataType _cos_phi_neg_sin_psi_theta = _cos_phi_neg * _sin_psi_theta;
dataType _cos_phi_sin_psi_theta = _cos_phi * _sin_psi_theta; // cos(phi)*sin(psi)*sin(theta)
dataType _cos_phi_sin_psi = _cos_phi * _sin_psi;// cos(phi)*sin(psi)
dataType _cos_psi_sin_phi_theta = _cos_psi * _sin_phi_theta;// cos(psi)*sin(phi)*sin(theta)
dataType _cos_theta_sin_phi = _cos_theta * _sin_phi; // cos(theta) * sin(phi)
//==============================================================================
// 2nd component Ry
dataType _cos_psi_sin_theta = _cos_psi * _sin_theta;// cos(psi)*sin(theta)
dataType _cos_psi_theta_sin_phi = _cos_psi_theta * _sin_phi; // cos(psi)*cos(theta)*sin(phi)
dataType _cos_theta_sin_phi_psi = _cos_theta * _sin_phi_psi; // cos(theta)*sin(phi)*sin(psi)
dataType _cos_phi_theta_sin_psi = _cos_phi_theta * _sin_psi;// cos(phi)*cos(theta)*sin(psi)
dataType _cos_phi_sin_theta = _cos_phi * _sin_theta; // cos(phi)*sin(theta)
//==============================================================================
// 3nd component Rz
dataType _cos_theta_sin_psi = _cos_theta * _sin_psi; // cos(theta) * sin(psi)
dataType _cos_phi_neg_sin_psi = _cos_phi_neg * _sin_psi;// -cos(phi)*sin(psi)
//==============================================================================
// Set the scales - Directional
dataType _cos_psi__sin_phi_theta = _cos_psi * _sin_phi_theta;
//==============================================================================
dataType _cos_psi_sin_phi = _cos_psi * _sin_phi;// cos(psi)*sin(phi)
//==============================================================================
dataType pVal, qVal; // Used in Finite differences
dataType hh;
//==============================================================================
// Setting same if uniform scale and rotation
#ifdef UNIFORM
sx = sy = sz;
phi = theta = psi;
#endif // UNIFORM
//==============================================================================
// Scale denominator
dataType inv_sx2 = (dataType)(1.0 / (sx*sx));
dataType inv_sy2 = (dataType)(1.0 / (sy*sy));
dataType inv_sz2 = (dataType)(1.0 / (sz*sz));
//==============================================================================
// Randomly generated x,y,z points
//==============================================================================
// Randomizing random number generation
//srand(time(NULL));
//==============================================================================
bool loop = true;
//==============================================================================
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
if (!params.use_FSM)
{
edgeDetection3dFunctionD(transPtr, edgeMovingPointer, imageLength, imageWidth, imageHeight, params.imageBackground, params.imageForeground, params.insideShapevalue);
}
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time recorded
edgeDetectionTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
dataType getDist;
//==============================================================================
size_t k_2, i_2;//loop counter for z dimension
//==============================================================================
size_t ptsNum = 0;
//==============================================================================
size_t k_min, k_max, i_min, i_max, j_min, j_max;
size_t i_2_max, i_2_min;
//==============================================================================
// Begin record time
// Calc. the points and store them
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
if (!params.use_FSM)
{
//==============================================================================
k_min = bestFit.k_min, k_max = bestFit.k_max + 1, i_min = bestFit.i_min, i_max = bestFit.i_max, j_min = bestFit.j_min, j_max = bestFit.j_max;
i_2_max = x_new(i_max, j_max, imageLength), i_2_min = x_new(i_min, j_min, imageLength);
//==============================================================================
Point3D * tmpPt;
//==============================================================================
// Find the surcae points only within the clipbox
for (k_2 = k_min; k_2 < k_max; k_2++) // z axis of the input surface or image
{
for (i_2 = i_2_min; i_2 < i_2_max; i_2++)// x-y axis of the input surface or image
{
//==============================================================================
if (edgeMovingPointer[k_2][i_2] == params.imageForeground)
{
//==============================================================================
tmpPt = &surface_points[ptsNum];
//==============================================================================
tmpPt->x = (dataType)(i_2 / imageLength);
tmpPt->y = (dataType)(i_2 % imageLength);
tmpPt->z = (dataType)k_2;
ptsNum++;
}
}
}
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time recorded
surfacePtsTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
// Pass this surface_points to get distance fn
//==============================================================================
// Generate and store the random points
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
if (!params.use_FSM)
{
l = 0;
Random3DPoints * tmpRdPts;
do
{
// Generate random points
if (params.use_clipbox)
{
// From clipbox
k = bestFit.k_min + (rand() % bestFit.k_max), i = bestFit.i_min + (rand() % bestFit.i_max), j = bestFit.j_min + (rand() % bestFit.j_max);
}
else
{
// From whole domain
k = rand() % imageHeight, i = rand() % imageLength, j = rand() % imageWidth;
}
// Check if inside the narrow band
x = x_new(i, j, imageLength);
if (params.binary_nband)
{
if (NFunctionBinary(fixedNBandPtr[k][x], movingNBandPtr[k][x], params.imageForeground) == 1) // Checks if inside the narrow band areas for random generated points
{
// Start loop
//==============================================================================
tmpRdPts = &generated_points[l];
tmpRdPts->k = k;
tmpRdPts->i = i;
tmpRdPts->j = j;
//==============================================================================
l = l + 1;
//==============================================================================
// Check condition
if (l == params.rand_points)
{
loop = false;
}
}
}
else
{
if (NFunction(destPtr[k][x], distTransPtr[k][x], params.imageForeground) == 1) // Checks if inside the narrow band areas for random generated points
{
// Start loop
//==============================================================================
tmpRdPts = &generated_points[l];
tmpRdPts->k = k;
tmpRdPts->i = i;
tmpRdPts->j = j;
//==============================================================================
l = l + 1;
//==============================================================================
// Check condition
if (l == params.rand_points)
{
loop = false;
}
}
}
} while ((loop) && (l <= params.rand_points));
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time recorded
generateRandomPtsTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
// Calculate the distances from generated points
distanceCalculated * tmpDistances;
xNbDistances * tmpXFwd;
yNbDistances * tmpYFwd;
zNbDistances * tmpZFwd;
dataType destFixed;
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
//==============================================================================
if (!params.use_FSM)
{
if (params.parallelize)
{
//==============================================================================
// Parallelize
omp_set_dynamic(0); // Disable dynamic adjustment of threads
//omp_set_num_threads(omp_num_procs()); // Request as many threads as you have processors
omp_set_num_threads(NUM_THREADS); // Request as many threads as you have processors
#pragma omp parallel
{
#pragma omp for private(k , i , j, x, tmpDistances, tmpXFwd, tmpYFwd, tmpZFwd, getDist, destFixed) firstprivate(hh, pVal, qVal) schedule(static) nowait
for (l = 0; l < params.rand_points; l++)
{
//==============================================================================
k = generated_points[l].k;
i = generated_points[l].i;
j = generated_points[l].j;
//==============================================================================
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
tmpDistances = &distances_calculated[l];
//==============================================================================
tmpXFwd = &xfwd_dist[l];
tmpYFwd = &yfwd_dist[l];
tmpZFwd = &zfwd_dist[l];
//==============================================================================
getDist = getDistance(edgeMovingPointer, imageHeight, imageLength, dim2D, k, x, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
destFixed = destPtr[k][x];
tmpDistances->distDifference = (dataType)((destFixed - getDist) * 2.0);
//==============================================================================
nbPointsX(edgeMovingPointer, h, x, k, i, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpXFwd->hvl = hh;
tmpXFwd->pvl = pVal;
tmpXFwd->qvl = qVal;
//==============================================================================
nbPointsY(edgeMovingPointer, h, k, i, j, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpYFwd->hvl = hh;
tmpYFwd->pvl = pVal;
tmpYFwd->qvl = qVal;
//==============================================================================
nbPointsZ(edgeMovingPointer, h, x, k, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpZFwd->hvl = hh;
tmpZFwd->pvl = pVal;
tmpZFwd->qvl = qVal;
//==============================================================================
}
//==============================================================================
}
}
else
{
//==============================================================================
// Sequential
for (l = 0; l < params.rand_points; l++)
{
k = generated_points[l].k;
i = generated_points[l].i;
j = generated_points[l].j;
//==============================================================================
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
//==============================================================================
tmpDistances = &distances_calculated[l];
//==============================================================================
tmpXFwd = &xfwd_dist[l];
tmpYFwd = &yfwd_dist[l];
tmpZFwd = &zfwd_dist[l];
//==============================================================================
getDist = getDistance(edgeMovingPointer, imageHeight, imageLength, dim2D, k, x, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
destFixed = destPtr[k][x];
tmpDistances->distDifference = (dataType)((destFixed - getDist) * 2.0);
//==============================================================================
nbPointsX(edgeMovingPointer, h, x, k, i, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpXFwd->hvl = hh;
tmpXFwd->pvl = pVal;
tmpXFwd->qvl = qVal;
//==============================================================================
nbPointsY(edgeMovingPointer, h, k, i, j, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpYFwd->hvl = hh;
tmpYFwd->pvl = pVal;
tmpYFwd->qvl = qVal;
//==============================================================================
nbPointsZ(edgeMovingPointer, h, x, k, imageHeight, imageLength, imageWidth, &pVal, &qVal, &hh, bestFit, surface_points, ptsNum, params.insideShapevalue, params.parallelize);
tmpZFwd->hvl = hh;
tmpZFwd->pvl = pVal;
tmpZFwd->qvl = qVal;
//==============================================================================
}
//==============================================================================
}
}
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time recorded
distanceCalculateTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
Finite_Differences * tmpFwd;
dataType pv_tmp, qv_tmp, h_tmp;
// Cacluate the finite differences
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
if (!params.use_FSM)
{
for (l = 0; l < params.rand_points; l++)
{
tmpFwd = &fwd_vals[l];
//==============================================================================
pv_tmp = xfwd_dist[l].pvl;
qv_tmp = xfwd_dist[l].qvl;
h_tmp = xfwd_dist[l].hvl;
//==============================================================================
tmpFwd->xFwd = finiteDifAll(pv_tmp, qv_tmp, h_tmp);
//==============================================================================
pv_tmp = yfwd_dist[l].pvl;
qv_tmp = yfwd_dist[l].qvl;
h_tmp = yfwd_dist[l].hvl;
//==============================================================================
tmpFwd->yFwd = finiteDifAll(pv_tmp, qv_tmp, h_tmp);
//==============================================================================
pv_tmp = zfwd_dist[l].pvl;
qv_tmp = zfwd_dist[l].qvl;
h_tmp = zfwd_dist[l].hvl;
//==============================================================================
tmpFwd->zFwd = finiteDifAll(pv_tmp, qv_tmp, h_tmp);
//==============================================================================
}
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time recorded
finiteDiffereceTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
// Calculate the gradient components
#ifdef MEASURE_TIME
firstCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
#endif
if (params.use_FSM)
{
l = 0;
do
{
// Generate random points
if (params.use_clipbox)
{
// From clipbox
k = bestFit.k_min + (rand() % bestFit.k_max), i = bestFit.i_min + (rand() % bestFit.i_max), j = bestFit.j_min + (rand() % bestFit.j_max);
}
else
{
// From whole domain
k = rand() % imageHeight, i = rand() % imageLength, j = rand() % imageWidth;
}
// 2D to 1D representation for i, j
x = x_new(i, j, imageLength);
// Check if inside the narrow band
if (params.binary_nband)
{
if (NFunctionBinary(fixedNBandPtr[k][x], movingNBandPtr[k][x], params.imageForeground) == 1) // Checks if inside the narrow band areas for random generated points
{
// Start loop
l = l + 1;
//==============================================================================
// Store the distance function difference
distDifference = (dataType)((destPtr[k][x] - distTransPtr[k][x]) * 2.0);
//==============================================================================
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
//==============================================================================
// Apply Forward Differences to the distTrans pointer
xFwd = finiteDifX(distTransPtr, h, x, k, i, imageLength);
yFwd = finiteDifY(distTransPtr, h, k, i, j, imageLength, imageWidth);
zFwd = finiteDifZ(distTransPtr, h, x, k, i, imageLength, imageHeight);
//==============================================================================
// Evaluate Individual Gradient Components
//==============================================================================
#ifdef DIRECTIONAL
// Set the Rotation - Directional
//==============================================================================
componentX = yFwd * (((tmpI)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sy)) + ((tmpJ)*((_cos_psi_neg_sin_phi - _cos_phi_sin_psi_theta) / sy)) + ((-tmpK)*((_cos_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_theta) / sz)) + ((tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz)));
//==============================================================================
componentY = xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_psi) / sy)) + ((tmpK)*((_sin_phi_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_theta_psi) / sz)) + ((tmpJ)*((_cos_phi_theta_sin_psi) / sz)) + ((-tmpK)*((_cos_phi_sin_theta) / sz)));
//==============================================================================
componentZ = xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_psi - _sin_phi_psi_theta) / sy)) + ((tmpJ)*((_cos_phi_neg_sin_psi - _cos_psi_sin_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / sz)) + ((tmpJ)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sz)));
//==============================================================================
// Set the Rotations - Directional
affineResult.rotation.x += (params.rotation_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.rotation.y += (params.rotation_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.rotation.z += (params.rotation_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
//==============================================================================
// Set the scales - Directional
componentX = xFwd * ((-tmpI)*((_cos_psi_theta) / (sx*sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (sx*sx))) + ((-tmpK)*((_sin_theta) / (sx*sx))));
//==============================================================================
componentY = yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi__sin_phi_theta) / (sy*sy))) + ((-tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / (sy*sy))) + ((tmpK)*((_cos_theta_sin_phi) / (sy*sy))));
//==============================================================================
componentZ = zFwd * (((-tmpI)*((_sin_phi_psi - _cos_phi_psi_sin_theta) / (sz*sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / (sz*sz))) + ((-tmpK)*((_cos_phi_theta) / (sz*sz))));
//==============================================================================
// Set the Scales - Directional Scales
affineResult.scaling.x += (params.scaling_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.scaling.y += (params.scaling_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.scaling.z += (params.scaling_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
#endif // DIRECTIONAL
//==============================================================================
// Translation Parameters - Always DIRECTIONAL
// Tx
affineResult.translation.x += (params.translation_weight * step_size*(-xFwd)*(distDifference)) / params.rand_points;
// Ty
affineResult.translation.y += (params.translation_weight * step_size*(-yFwd)*(distDifference)) / params.rand_points;
// Tz
affineResult.translation.z += (params.translation_weight * step_size*(-zFwd)*(distDifference)) / params.rand_points;
//==============================================================================
// Check condition
if (l == params.rand_points)
{
loop = false;
}
}
}
else
{
if (NFunction(destPtr[k][x], distTransPtr[k][x], NDelta) == 1) // Checks if inside the band for random generated points
//if (NFunctionBinary(fixedNBandPtr[k][x], movingNBandPtr[k][x], foregound) == 1) // Checks if inside the narrow band areas for random generated points
{
// Start loop
l = l + 1;
//==============================================================================
// Store the distance function difference
distDifference = (dataType)((destPtr[k][x] - distTransPtr[k][x]) * 2.0);
//==============================================================================
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
//==============================================================================
// Apply Forward Differences to the distTrans pointer
xFwd = finiteDifX(distTransPtr, h, x, k, i, imageLength);
yFwd = finiteDifY(distTransPtr, h, k, i, j, imageLength, imageWidth);
zFwd = finiteDifZ(distTransPtr, h, x, k, i, imageLength, imageHeight);
//==============================================================================
// Evaluate Individual Gradient Components
//==============================================================================
#ifdef DIRECTIONAL
// Set the Rotation - Directional
//==============================================================================
componentX = yFwd * (((tmpI)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sy)) + ((tmpJ)*((_cos_psi_neg_sin_phi - _cos_phi_sin_psi_theta) / sy)) + ((-tmpK)*((_cos_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_theta) / sz)) + ((tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz)));
//==============================================================================
componentY = xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_psi) / sy)) + ((tmpK)*((_sin_phi_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_theta_psi) / sz)) + ((tmpJ)*((_cos_phi_theta_sin_psi) / sz)) + ((-tmpK)*((_cos_phi_sin_theta) / sz)));
//==============================================================================
componentZ = xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_psi - _sin_phi_psi_theta) / sy)) + ((tmpJ)*((_cos_phi_neg_sin_psi - _cos_psi_sin_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / sz)) + ((tmpJ)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sz)));
//==============================================================================
// Set the Rotations - Directional
affineResult.rotation.x += (params.rotation_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.rotation.y += (params.rotation_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.rotation.z += (params.rotation_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
//==============================================================================
// Set the scales - Directional
componentX = xFwd * ((-tmpI)*((_cos_psi_theta) / (sx*sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (sx*sx))) + ((-tmpK)*((_sin_theta) / (sx*sx))));
//==============================================================================
componentY = yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi__sin_phi_theta) / (sy*sy))) + ((-tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / (sy*sy))) + ((tmpK)*((_cos_theta_sin_phi) / (sy*sy))));
//==============================================================================
componentZ = zFwd * (((-tmpI)*((_sin_phi_psi - _cos_phi_psi_sin_theta) / (sz*sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / (sz*sz))) + ((-tmpK)*((_cos_phi_theta) / (sz*sz))));
//==============================================================================
// Set the Scales - Directional Scales
affineResult.scaling.x += (params.scaling_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.scaling.y += (params.scaling_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.scaling.z += (params.scaling_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
#endif // DIRECTIONAL
//==============================================================================
// Translation Parameters - Always DIRECTIONAL
// Tx
affineResult.translation.x += (params.translation_weight * step_size*(-xFwd)*(distDifference)) / params.rand_points;
// Ty
affineResult.translation.y += (params.translation_weight * step_size*(-yFwd)*(distDifference)) / params.rand_points;
// Tz
affineResult.translation.z += (params.translation_weight * step_size*(-zFwd)*(distDifference)) / params.rand_points;
//==============================================================================
// Check condition
if (l == params.rand_points)
{
loop = false;
}
}
}
} while ((loop) && (l <= params.rand_points));
}
else
{
for (l = 0; l < params.rand_points; l++)
{
//==============================================================================
yFwd = fwd_vals[l].yFwd;
xFwd = fwd_vals[l].xFwd;
zFwd = fwd_vals[l].zFwd;
//==============================================================================
distDifference = distances_calculated[l].distDifference;
//==============================================================================
k = generated_points[l].k;
i = generated_points[l].i;
j = generated_points[l].j;
//==============================================================================
// Directional component vector derivatives - i, j, k
dataType tmpI = i / (dataType)imageLength, tmpJ = j / (dataType)imageWidth, tmpK = k / (dataType)imageHeight;
//==============================================================================
// Shorten the radian calcultaion
//==============================================================================
#ifdef DIRECTIONAL
// Set the Rotation - Directional
//==============================================================================
componentX = yFwd * (((tmpI)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sy)) + ((tmpJ)*((_cos_psi_neg_sin_phi - _cos_phi_sin_psi_theta) / sy)) + ((-tmpK)*((_cos_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_phi_sin_psi + _cos_psi_sin_phi_theta) / sz)) + ((tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / sz)) + ((-tmpK)*((_cos_theta_sin_phi) / sz)));
//==============================================================================
componentY = xFwd * (((-tmpI)*((_cos_psi_sin_theta) / sx)) + ((tmpJ)*((_sin_psi_theta) / sx)) + ((tmpK)*((_cos_theta) / sx))) +
yFwd * (((tmpI)*((_cos_psi_theta_sin_phi) / sy)) + ((-tmpJ)*((_cos_theta_sin_phi_psi) / sy)) + ((tmpK)*((_sin_phi_theta) / sy))) +
zFwd * (((-tmpI)*((_cos_phi_theta_psi) / sz)) + ((tmpJ)*((_cos_phi_theta_sin_psi) / sz)) + ((-tmpK)*((_cos_phi_sin_theta) / sz)));
//==============================================================================
componentZ = xFwd * (((-tmpI)*((_cos_theta_sin_psi) / sx)) + ((-tmpJ)*((_cos_psi_theta) / sx))) +
yFwd * (((tmpI)*((_cos_phi_psi - _sin_phi_psi_theta) / sy)) + ((tmpJ)*((_cos_phi_neg_sin_psi - _cos_psi_sin_phi_theta) / sy))) +
zFwd * (((tmpI)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / sz)) + ((tmpJ)*((_sin_phi_neg_sin_psi + _cos_phi_psi_sin_theta) / sz)));
//==============================================================================
// Set the Rotations - Directional
affineResult.rotation.x += (params.rotation_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.rotation.y += (params.rotation_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.rotation.z += (params.rotation_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
//==============================================================================
componentX = xFwd * ((-tmpI)*((_cos_psi_theta) / (sx*sx)) + ((tmpJ)*((_cos_theta_sin_psi) / (sx*sx))) + ((-tmpK)*((_sin_theta) / (sx*sx))));
//==============================================================================
componentY = yFwd * (((-tmpI)*((_cos_phi_sin_psi + _cos_psi__sin_phi_theta) / (sy*sy))) + ((-tmpJ)*((_cos_phi_psi - _sin_phi_psi_theta) / (sy*sy))) + ((tmpK)*((_cos_theta_sin_phi) / (sy*sy))));
//==============================================================================
componentZ = zFwd * (((-tmpI)*((_sin_phi_psi - _cos_phi_psi_sin_theta) / (sz*sz))) + ((-tmpJ)*((_cos_psi_sin_phi + _cos_phi_sin_psi_theta) / (sz*sz))) + ((-tmpK)*((_cos_phi_theta) / (sz*sz))));
//==============================================================================
// Set the Scales - Directional Scales
affineResult.scaling.x += (params.scaling_weight * (step_size)*(componentX)*(distDifference)) / params.rand_points;
affineResult.scaling.y += (params.scaling_weight * (step_size)*(componentY)*(distDifference)) / params.rand_points;
affineResult.scaling.z += (params.scaling_weight * (step_size)*(componentZ)*(distDifference)) / params.rand_points;
#endif // DIRECTIONAL
//==============================================================================
// Translation Parameters - Always DIRECTIONAL
// Tx
affineResult.translation.x += (params.translation_weight * step_size*(-xFwd)*(distDifference)) / params.rand_points;
// Ty
affineResult.translation.y += (params.translation_weight * step_size*(-yFwd)*(distDifference)) / params.rand_points;
// Tz
affineResult.translation.z += (params.translation_weight * step_size*(-zFwd)*(distDifference)) / params.rand_points;
}
}
//==============================================================================
#ifdef MEASURE_TIME
secondCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store the time
gradientTotalCpuTime += secondCpuTime - firstCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("SGD Components calc. CPU time at iteration %4d: %e secs\n\n", iteration, secondCpuTime - firstCpuTime);
#endif
//==============================================================================
// Increase Iteration
iteration++;
//==============================================================================
}
}
//==============================================================================
// Stop Timing the Registration Process
#ifdef MEASURE_TIME
regStopCpuTime = clock() / (dataType)(CLOCKS_PER_SEC);
// Store Time For Each Registration run
regTotalCpuTimen = regStopCpuTime - regStartCpuTime;
#endif
//==============================================================================
#ifdef CONSOLE_OUTPUT
printf("Total Registration Function calc. CPU Time is: %e secs\n\n", regTotalCpuTimen);
#endif
//==============================================================================
return affineResult;
//==============================================================================
}
//==============================================================================
ClipBox findClipBoxSingle(dataType ** Source, size_t imageHeight, size_t imageLength, size_t imageWidth)
{
ClipBox coord;
coord.k_min = imageHeight, coord.i_min = imageLength, coord.j_min = imageWidth, coord.k_max = 0, coord.i_max = 0, coord.j_max = 0;
size_t k, i, j;
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
if (NFunctionOne(Source[k][x], NDelta) == 1)
{
// Find K clip
if (k > coord.k_max)
{
coord.k_max = k;
}
if (k < coord.k_min)
{
coord.k_min = k;
}
// FInd I clip
if (i > coord.i_max)
{
coord.i_max = i;
}
if (i < coord.i_min)
{
coord.i_min = i;
}
// Find J clip
if (j > coord.j_max)
{
coord.j_max = j;
}
if (j < coord.j_min)
{
coord.j_min = j;
}
}
}
}
}
return coord;
}
//==============================================================================
void fillNarrowBandArea(dataType ** sourceDist, dataType ** bandContainer, size_t imageHeight, size_t imageLength, size_t imageWidth, dataType insideValue, dataType outsideValue)
{
size_t k, i, j;
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t xd = x_new(i, j, imageLength);
if (NFunctionOne(sourceDist[k][xd], NDelta) == 1)
{
bandContainer[k][xd] = insideValue;
}
else
{
bandContainer[k][xd] = outsideValue;
}
}
}
}
}
//==============================================================================
ClipBox findClipBoxTwo(dataType ** destination, dataType ** source, size_t imageHeight, size_t imageLength, size_t imageWidth)
{
ClipBox coord;
coord.k_min = imageHeight, coord.i_min = imageLength, coord.j_min = imageWidth, coord.k_max = 0, coord.i_max = 0, coord.j_max = 0;
size_t k, i, j;
for (k = 0; k < imageHeight; k++)
{
for (i = 0; i < imageLength; i++)
{
for (j = 0; j < imageWidth; j++)
{
// 2D to 1D representation for i, j
size_t x = x_new(i, j, imageLength);
if (NFunction(destination[k][x], source[k][x], NDelta) == 1)
{
// Find K clip
if (k > coord.k_max)
{
coord.k_max = k;
}
else if (k < coord.k_min)
{
coord.k_min = k;
}
// FInd I clip
if (i > coord.i_max)
{
coord.i_max = i;
}
else if (i < coord.i_min)
{
coord.i_min = i;
}
// Find J clip
if (j > coord.j_max)
{
coord.j_max = j;
}
else if (j < coord.j_min)
{
coord.j_min = j;
}
}
}
}
}
return coord;
}
//==============================================================================
void transformClip(ClipBox *bestfit, Point3D translation, Point3D scaling, Point3D rotation, dataType centroid[3], size_t imageHeight, size_t imageLength, size_t imageWidth)
{
//==============================================================================
size_t k_min = (*bestfit).k_min, k_max = (*bestfit).k_max;
size_t i_min = (*bestfit).i_min, i_max = (*bestfit).i_max;
size_t j_min = (*bestfit).j_min, j_max = (*bestfit).j_max;
//==============================================================================
// Floor Points - k min
// i_min, j_min, k_min; // Corner 1
// i_max, j_min, k_min; // Corner 2
// i_min, j_max, k_min; // Corner 3
// i_max, j_max, k_min; // Corner 4
//==============================================================================
// Ceiling Points - k max
// i_min, j_min, k_max; // Corner 5
// i_max, j_min, k_max; // Corner 6
// i_min, j_max, k_max; // Corner 7
// i_max, j_max, k_max; // Corner 8
//==============================================================================
// Transform the bottom corner point
// Corner 1
CoordPoints c1 = { k_min, i_min, j_min };
c1 = transformPoint(&c1, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 1);
CoordPoints c2 = { k_min, i_max, j_min };
c2 = transformPoint(&c2, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 1);
CoordPoints c3 = { k_min, i_min, j_max };
c3 = transformPoint(&c3, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 1);
CoordPoints c4 = { k_min, i_max, j_max };
c4 = transformPoint(&c4, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 1);
//==============================================================================
//transform the top corner points
// Corner 1
CoordPoints c5 = { k_max, i_min, j_min };
c5 = transformPoint(&c5, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 2);
CoordPoints c6 = { k_max, i_max, j_min };
c6 = transformPoint(&c6, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 2);
CoordPoints c7 = { k_max, i_min, j_max };
c7 = transformPoint(&c7, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 2);
CoordPoints c8 = { k_max, i_max, j_max };
c8 = transformPoint(&c8, translation, scaling, rotation, centroid, imageHeight, imageLength, imageWidth, 2);
//==============================================================================
size_t min_ab, min_cd, min_ef, min_gh, min_1, min_2;
// K min
min_ab = c1.k < c2.k ? c1.k : c2.k; // c1 vs c2
min_cd = c3.k < c4.k ? c3.k : c4.k; // c3 vs c4
min_1 = min_ab < min_cd ? min_ab : min_cd; // min. of cube floor point k
min_ef = c4.k < c5.k ? c4.k : c5.k; // c4 vs c5
min_gh = c7.k < c8.k ? c7.k : c8.k; // c7 vs c8
min_2 = min_ef < min_gh ? min_ef : min_gh; // min. of cube ceiling point k
(*bestfit).k_min = min_1 < min_2 ? min_1 : min_2; // min. k in all 8 corners
// I min
min_ab = c1.i < c2.i ? c1.i : c2.i; // c1 vs c2
min_cd = c3.i < c4.i ? c3.i : c4.i; // c3 vs c4
min_1 = min_ab < min_cd ? min_ab : min_cd; // min. of cube floor point i
min_ef = c4.i < c5.i ? c4.i : c5.i; // c4 vs c5
min_gh = c7.i < c8.i ? c7.i : c8.i; // c7 vs c8
min_2 = min_ef < min_gh ? min_ef : min_gh; // min. of cube ceiling point i
(*bestfit).i_min = min_1 < min_2 ? min_1 : min_2; // min. i in all 8 corners
// J min
min_ab = c1.j < c2.j ? c1.j : c2.j; // c1 vs c2
min_cd = c3.j < c4.j ? c3.j : c4.j; // c3 vs c4
min_1 = min_ab < min_cd ? min_ab : min_cd; // min. of cube floor point j
min_ef = c4.j < c5.j ? c4.j : c5.j; // c4 vs c5
min_gh = c7.j < c8.j ? c7.j : c8.j; // c7 vs c8
min_2 = min_ef < min_gh ? min_ef : min_gh; // min. of cube ceiling point j
(*bestfit).j_min = min_1 < min_2 ? min_1 : min_2; // min. j in all 8 corners
//==============================================================================
size_t max_ab, max_cd, max_ef, max_gh, max_1, max_2;
// Max End Points
// K max
max_ab = c1.k > c2.k ? c1.k : c2.k; // c1 vs c2
max_cd = c3.k > c4.k ? c3.k : c4.k; // c3 vs c4
max_1 = max_ab > max_cd ? max_ab : max_cd; // max. of cube floor point k
max_ef = c5.k > c6.k ? c5.k : c6.k; // c4 vs c5
max_gh = c7.k > c8.k ? c7.k : c8.k; // c7 vs c8
max_2 = max_ef > max_gh ? max_ef : max_gh; // max. of cube ceiling point k
(*bestfit).k_max = max_1 > max_2 ? max_1 : max_2; // max k in all 8 corners
// I max
max_ab = c1.i > c2.i ? c1.i : c2.i; // c1 vs c2
max_cd = c3.i > c4.i ? c3.i : c4.i; // c3 vs c4
max_1 = max_ab > max_cd ? max_ab : max_cd; // max. of cube floor point i
max_ef = c5.i > c6.i ? c5.i : c6.i; // c4 vs c5
max_gh = c7.i > c8.i ? c7.i : c8.i; // c7 vs c8
max_2 = max_ef > max_gh ? max_ef : max_gh; // max. of cube ceiling point i
(*bestfit).i_max = max_1 > max_2 ? max_1 : max_2; // max i in all 8 corners
// J max
max_ab = c1.j > c2.j ? c1.j : c2.j; // c1 vs c2
max_cd = c3.j > c4.j ? c3.j : c4.j; // c3 vs c4
max_1 = max_ab > max_cd ? max_ab : max_cd; // max. of cube floor point k
max_ef = c5.j > c6.j ? c5.j : c6.j; // c4 vs c5
max_gh = c7.j > c8.j ? c7.j : c8.j; // c7 vs c8
max_2 = max_ef > max_gh ? max_ef : max_gh; // max. of cube ceiling point j
(*bestfit).j_max = max_1 > max_2 ? max_1 : max_2; // max j in all 8 corners
//==============================================================================
}
//==============================================================================
CoordPoints transformPoint(CoordPoints * inputPoints, Point3D translation, Point3D scaling, Point3D rotation, dataType centroid[3], size_t imageHeight, size_t imageLength, size_t imageWidth, int loc)
{
//==============================================================================
size_t bottom, left, begin;
//==============================================================================
dataType k_a, i_a, j_a; // Affine indices
// Transformed
dataType k_t, i_t, j_t; // Transformed indices
// Temporary parameters
dataType tmpX, tmpY, tmpZ;
dataType cz = centroid[2], cx = centroid[0], cy = centroid[1];
dataType theta = (rotation.y), psi = (rotation.z), phi = (rotation.x);
//==============================================================================
size_t k = (*inputPoints).k, i = (*inputPoints).i, j = (*inputPoints).j;
//==============================================================================
// 1. Move to origin for Min
k_a = k - cz; // Move to origin Z
i_a = i - cx; // Move to origin x
j_a = j - cy; // Move to origin Y
//==============================================================================
// Apply scaling
tmpZ = k_a / scaling.z;
tmpX = i_a / scaling.x;
tmpY = j_a / scaling.y;
//==============================================================================
// Apply Rotation
i_t = x_rotate(tmpZ, tmpX, tmpY, theta, psi);
j_t = y_rotate(tmpZ, tmpX, tmpY, theta, psi, phi);
k_t = z_rotate(tmpZ, tmpX, tmpY, theta, psi, phi);
//==============================================================================
// Move back to centroid
tmpX = i_t + cx;
tmpY = j_t + cy;
tmpZ = k_t + cz;
//==============================================================================
// Set the values
i_t = tmpX;
j_t = tmpY;
k_t = tmpZ;
//==============================================================================
// Add translation - Translation already included in the points!
i_t = i_t - translation.x;
j_t = j_t - translation.y;
k_t = k_t - translation.z;
if (loc == 1) // Lower
{
bottom = (size_t)floor(k_t);
bottom = max(bottom, 0);
bottom = min(bottom, imageHeight - 1);
(*inputPoints).k = bottom;
// X
size_t left = (size_t)floor(i_t);
left = max(left, 0);
left = min(left, imageLength - 1);
(*inputPoints).i = left;
// Y
size_t begin = (size_t)floor(j_t);
begin = max(begin, 0);
begin = min(begin, imageWidth - 1);
(*inputPoints).j = begin;
}
else if (loc == 2) // Upper
{
bottom = (int)ceil(k_t);
bottom = max(bottom, 0);
bottom = min(bottom, imageHeight - 1);
(*inputPoints).k = bottom;
// X
left = (int)ceil(i_t);
left = max(left, 0);
left = min(left, imageLength - 1);
(*inputPoints).i = left;
// Y
begin = (int)ceil(j_t);
begin = max(begin, 0);
begin = min(begin, imageWidth - 1);
(*inputPoints).j = begin;
}
return *inputPoints;
}
//==============================================================================
dataType getDistance(dataType ** binaryImage, size_t imageHeight, size_t imageLength, size_t dim2D, const size_t k1, const size_t x1, ClipBox bestfitBox, Point3D * surface_points, size_t ptsNum, dataType insideShapevalue, bool parallelize)
{
dataType pv = 10000;
//==============================================================================
dataType dx, dy, dz, dist;
//==============================================================================
dataType tmpX, tmpY, tmpZ;
//==============================================================================
tmpZ = (dataType)k1;
//==============================================================================
tmpX = (dataType)(x1 / imageLength);
//==============================================================================
tmpY = (dataType)(x1 % imageLength);
//==============================================================================
Point3D tmpXYZ;
tmpXYZ.z = (dataType)k1;
//==============================================================================
tmpXYZ.x = (dataType)(x1 / imageLength);
//==============================================================================
tmpXYZ.y = (dataType)(x1 % imageLength);
//==============================================================================
int i;
//==============================================================================
if (!parallelize) // Run sequential code
{
//==============================================================================
// Sequetial
for (i = 0; i < ptsNum; i++)
{
//==============================================================================
dz = tmpZ - surface_points[i].z; // difference between z axes of both images
dx = tmpX - surface_points[i].x;// difference between x axes of both images
dy = tmpY - surface_points[i].y;// difference between y axes of both images
dist = dx * dx + dy * dy + dz * dz;
//==============================================================================
if (dist <= pv)
{
pv = dist;
}
//==============================================================================
}
//==============================================================================
}
else // Run parallelized
{
//==============================================================================
// OpenMp
int nthreads;
dataType distances[NUM_THREADS][PAD];
omp_set_dynamic(0); // Disable dynamic adjustment of threads
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int i, id, nthrds;
dataType dx, dy, dz, dist, distPv;
id = omp_get_thread_num();
nthrds = omp_get_num_threads();
if (id == 0) { nthreads = nthrds; }
for (i = id, distPv = 10000; i < ptsNum; i = i + nthrds)
{
//==============================================================================
//printf("Running on thread %d\n", id);
//==============================================================================
dz = tmpZ - surface_points[i].z; // difference between z axes of both images
dx = tmpX - surface_points[i].x;// difference between x axes of both images
dy = tmpY - surface_points[i].y;// difference between y axes of both images
dist = dx * dx + dy * dy + dz * dz;
//==============================================================================
//distPv = min(distPv, dist);
if (dist <= distPv)
{
distPv = dist;
}
//==============================================================================
//distances[id][0] = distPv;
//==============================================================================
}
distances[id][0] = distPv;
//==============================================================================
}
for (size_t i = 0; i < nthreads; i++)
{
if (pv >= distances[i][0])
{
pv = distances[i][0];
}
}
}
//==============================================================================
pv = (dataType)sqrt(pv);
//==============================================================================
// Check if the point is inside
if (binaryImage[k1][x1] == insideShapevalue)
{
// pv = -1 * pv;
pv = 0;
}
//==============================================================================
return pv;
//==============================================================================
}
//==============================================================================
// Returns points for the surface/object
size_t surfacePoints(dataType ** binaryImage, size_t imageLength, const unsigned char fgroundValue, ClipBox bestfitBox)
{
//==============================================================================
size_t k_2, i_2;//loop counter for z dimension
//==============================================================================
size_t ptsNum = 0;
//==============================================================================
// In the clip box volume
size_t k_min = bestfitBox.k_min, k_max = bestfitBox.k_max + 1, i_min = bestfitBox.i_min, i_max = bestfitBox.i_max, j_min = bestfitBox.j_min, j_max = bestfitBox.j_max;
size_t i_2_max = x_new(i_max, j_max, imageLength), i_2_min = x_new(i_min, j_min, imageLength);
// Find the surcae points only within the clipbox
for (k_2 = k_min; k_2 < k_max; k_2++) // z axis of the input surface or image
{
for (i_2 = i_2_min; i_2 < i_2_max; i_2++)// x-y axis of the input surface or image
{
if (binaryImage[k_2][i_2] == fgroundValue)
{
ptsNum++;
}
}
}
return ptsNum;
}
//==============================================================================
// Converts unsigned to signed distance map
void converToSignedDist(dataType ** distFn, dataType ** originalData, size_t imageHeight, size_t imageLength, size_t imageWidth, const unsigned char fgroundValue)
{
for (size_t k = 0; k < imageHeight; k++)
{
for (size_t i = 0; i < imageLength; i++)
{
for (size_t j = 0; j < imageWidth; j++)
{
size_t xd = x_new(i, j, imageLength);
if (originalData[k][xd] == fgroundValue)
{
distFn[k][xd] = -1 * distFn[k][xd];
}
}
}
}
}
//==============================================================================
dataType errorCalc(dataType ** aPtr, dataType ** bPtr, size_t height, size_t length, size_t width, dataType h_val)
{
dataType error = 0.0, tmp;
size_t i, j, k, xd, count = 0;
for (k = 0; k < height; k++)
{
for (i = 0; i < length; i++)
{
for (j = 0; j < width; j++)
{
// 1D Conversion of row and column
xd = x_new(i, j, length);
// Error calculation
tmp = (aPtr[k][xd] - bPtr[k][xd]) * h_val;
error += (tmp*tmp);
count++;
}
}
}
// Mean Square Error
if (count == 0)
{
count = 1;
}
error = (dataType) ((error) / (2. * count));
//return sqrtf(error); // // Root Mean Square Error
return error; // // Mean Square Error
}
//==============================================================================
|
MM.c
|
#include<stdlib.h>
#include<stdio.h>
#include<omp.h>
typedef unsigned int myentry;
typedef unsigned int myindex;
void multMatrices(int N, int M, int K, myentry *A, myentry *B, myentry *C) {
#pragma omp parallel for
for(myindex i = 0; i < N; i++){
for(myindex j = 0; j < M; j++){
myentry x = A[i*N+j];
for(myindex k = 0; k < K; k++)
C[k+i*N] += x * B[j*N+k];
}
}
}
int x = 0;
int nextPR() {
x = (x+234532)*((x>> 5 )+12234);
return x & 16383;
}
long int hash(long int a, long int b) { return (a | a<<27)*(b+2352351);}
int main(int argc, char **argv){
if(argc != 3) {
printf("Usage: mult N seed\n");
exit(1);
}
omp_set_num_threads(4);
myindex N = atoi(argv[1]);
x = atoi(argv[2]);
myentry *A = malloc( N*N*sizeof(myentry));
myentry *B = malloc( N*N*sizeof(myentry));
myentry *C = malloc( N*N*sizeof(myentry));
if( A == NULL || B==NULL || C==NULL ) {
printf("Could not allocate memory");
exit(2);
}
for(int i=0; i< N; i++) {
myindex row = i*N;
for(int col=0; col<N; col++) {
myindex index = row+col;
A[index] = nextPR();
B[index] = nextPR();
}
}
multMatrices(N,N,N, A,B,C);
int h = atoi(argv[2]);
for(int k=0;k<3;k++)
for(int i=0; i< N*N; i++) {
// printf("%f ", C[i]);
h = hash(h, (long int) C[i]);
}
printf( "%d\n", h & 1023);
return 0;
}
|
omp.h
|
#ifndef PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
#define PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
#if defined(PARLAY_OPENMP)
#include <omp.h>
namespace parlay {
// IWYU pragma: private, include "../../parallel.h"
inline size_t num_workers() { return omp_get_max_threads(); }
inline size_t worker_id() { return omp_get_thread_num(); }
template <class F>
inline void parallel_for(size_t start, size_t end, F f, long, bool) {
_Pragma("omp parallel for")
for(size_t i=start; i<end; i++) f(i);
}
bool in_par_do = false;
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
{
left();
}
#pragma omp task
{
right();
}
}
}
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
}
}
} // namespace parlay
#endif
#endif // PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
|
imag_self_energy_with_g.c
|
/* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include "lagrid.h"
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "imag_self_energy_with_g.h"
#include "triplet.h"
static long ise_set_g_pos_frequency_point(long (*g_pos)[4],
const long num_band0,
const long num_band,
const char *g_zero);
static void
detailed_imag_self_energy_at_triplet(double *detailed_imag_self_energy,
double *imag_self_energy,
const long num_band0,
const long num_band,
const double *fc3_normal_squared,
const double *frequencies,
const long triplet[3],
const double *g1,
const double *g2_3,
const char *g_zero,
const double *temperatures,
const long num_temps,
const double cutoff_frequency);
static double
collect_detailed_imag_self_energy(double *imag_self_energy,
const long num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const double *g2_3,
const char *g_zero);
static double
collect_detailed_imag_self_energy_0K(double *imag_self_energy,
const long num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g,
const char *g_zero);
static void set_occupations(double *n1,
double *n2,
const long num_band,
const double temperature,
const long triplet[3],
const double *frequencies,
const double cutoff_frequency);
void ise_get_imag_self_energy_at_bands_with_g(double *imag_self_energy,
const Darray *fc3_normal_squared,
const double *frequencies,
const long (*triplets)[3],
const long *triplet_weights,
const double *g,
const char *g_zero,
const double temperature,
const double cutoff_frequency,
const long num_frequency_points,
const long frequency_point_index)
{
long i, j, num_triplets, num_band0, num_band, num_band_prod;
long num_g_pos, g_index_dims, g_index_shift;
long (*g_pos)[4];
double *ise;
long at_a_frequency_point;
g_pos = NULL;
ise = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double*)malloc(sizeof(double) * num_triplets * num_band0);
if (frequency_point_index < 0) {
/* frequency_points == frequencies at bands */
at_a_frequency_point = 0;
g_index_dims = num_band_prod;
g_index_shift = 0;
} else {
/* At an arbitrary frequency point. */
at_a_frequency_point = 1;
g_index_dims = num_frequency_points * num_band * num_band;
g_index_shift = frequency_point_index * num_band * num_band;
}
#pragma omp parallel for private(num_g_pos, j, g_pos)
for (i = 0; i < num_triplets; i++) {
g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod);
/* ise_set_g_pos only works for the case of frquency points at */
/* bands. For frequency sampling mode, g_zero is assumed all */
/* with the array shape of (num_triplets, num_band0, num_band, */
/* num_band). */
if (at_a_frequency_point) {
num_g_pos = ise_set_g_pos_frequency_point(
g_pos,
num_band0,
num_band,
g_zero + i * g_index_dims + g_index_shift);
} else {
num_g_pos = ise_set_g_pos(g_pos,
num_band0,
num_band,
g_zero + i * num_band_prod);
}
ise_imag_self_energy_at_triplet(
ise + i * num_band0,
num_band0,
num_band,
fc3_normal_squared->data + i * num_band_prod,
frequencies,
triplets[i],
triplet_weights[i],
g + i * g_index_dims + g_index_shift,
g + (i + num_triplets) * g_index_dims + g_index_shift,
g_pos,
num_g_pos,
&temperature,
1,
cutoff_frequency,
0,
at_a_frequency_point);
free(g_pos);
g_pos = NULL;
}
for (i = 0; i < num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_band0; j++) {
imag_self_energy[j] += ise[i * num_band0 + j];
}
}
free(ise);
ise = NULL;
}
void ise_get_detailed_imag_self_energy_at_bands_with_g
(double *detailed_imag_self_energy,
double *imag_self_energy_N,
double *imag_self_energy_U,
const Darray *fc3_normal_squared,
const double *frequencies,
const long (*triplets)[3],
const long *triplet_weights,
const long (*bz_grid_addresses)[3],
const double *g,
const char *g_zero,
const double temperature,
const double cutoff_frequency)
{
double *ise;
long i, j, num_triplets, num_band0, num_band, num_band_prod;
long *is_N;
double ise_tmp, N, U;
ise = NULL;
is_N = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double*)malloc(sizeof(double) * num_triplets * num_band0);
/* detailed_imag_self_energy has the same shape as fc3_normal_squared. */
#pragma omp parallel for
for (i = 0; i < num_triplets; i++) {
detailed_imag_self_energy_at_triplet
(detailed_imag_self_energy + i * num_band_prod,
ise + i * num_band0,
num_band0,
num_band,
fc3_normal_squared->data + i * num_band_prod,
frequencies,
triplets[i],
g + i * num_band_prod,
g + (i + num_triplets) * num_band_prod,
g_zero + i * num_band_prod,
&temperature,
1,
cutoff_frequency);
}
is_N = (long*)malloc(sizeof(long) * num_triplets);
for (i = 0; i < num_triplets; i++) {
is_N[i] = tpl_is_N(triplets[i], bz_grid_addresses);
}
for (i = 0; i < num_band0; i++) {
N = 0;
U = 0;
/* #pragma omp parallel for private(ise_tmp) reduction(+:N,U) */
for (j = 0; j < num_triplets; j++) {
ise_tmp = ise[j * num_band0 + i] * triplet_weights[j];
if (is_N[j]) {
N += ise_tmp;
} else {
U += ise_tmp;
}
}
imag_self_energy_N[i] = N;
imag_self_energy_U[i] = U;
}
free(is_N);
is_N = NULL;
free(ise);
ise = NULL;
}
void ise_imag_self_energy_at_triplet(double *imag_self_energy,
const long num_band0,
const long num_band,
const double *fc3_normal_squared,
const double *frequencies,
const long triplet[3],
const long triplet_weight,
const double *g1,
const double *g2_3,
const long (*g_pos)[4],
const long num_g_pos,
const double *temperatures,
const long num_temps,
const double cutoff_frequency,
const long openmp_at_bands,
const long at_a_frequency_point)
{
long i, j;
double *n1, *n2;
long g_pos_3;
n1 = (double*)malloc(sizeof(double) * num_temps * num_band);
n2 = (double*)malloc(sizeof(double) * num_temps * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1 + i * num_band,
n2 + i * num_band,
num_band,
temperatures[i],
triplet,
frequencies,
cutoff_frequency);
}
for (i = 0; i < num_band0 * num_temps; i++) {
imag_self_energy[i] = 0;
}
/* Do not use OpenMP here!! */
/* g_pos[i][0] takes value 0 <= x < num_band0 only, */
/* which causes race condition. */
for (i = 0; i < num_g_pos; i++) {
if (at_a_frequency_point) {
/* At an arbitrary frequency point */
g_pos_3 = g_pos[i][3] % (num_band * num_band);
} else {
/* frequency_points == frequencies at bands */
g_pos_3 = g_pos[i][3];
}
for (j = 0; j < num_temps; j++) {
if (n1[j * num_band + g_pos[i][1]] < 0 ||
n2[j * num_band + g_pos[i][2]] < 0) {
;
} else {
if (temperatures[j] > 0) {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
((n1[j * num_band + g_pos[i][1]] +
n2[j * num_band + g_pos[i][2]] + 1) * g1[g_pos_3] +
(n1[j * num_band + g_pos[i][1]] -
n2[j * num_band + g_pos[i][2]]) * g2_3[g_pos_3]) *
fc3_normal_squared[g_pos[i][3]] * triplet_weight;
} else {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
g1[g_pos_3] * fc3_normal_squared[g_pos[i][3]] * triplet_weight;
}
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
long ise_set_g_pos(long (*g_pos)[4],
const long num_band0,
const long num_band,
const char *g_zero)
{
long num_g_pos, j, k, l, jkl;
num_g_pos = 0;
jkl = 0;
for (j = 0; j < num_band0; j++) {
for (k = 0; k < num_band; k++) {
for (l = 0; l < num_band; l++) {
if (!g_zero[jkl]) {
g_pos[num_g_pos][0] = j;
g_pos[num_g_pos][1] = k;
g_pos[num_g_pos][2] = l;
g_pos[num_g_pos][3] = jkl;
num_g_pos++;
}
jkl++;
}
}
}
return num_g_pos;
}
static long ise_set_g_pos_frequency_point(long (*g_pos)[4],
const long num_band0,
const long num_band,
const char *g_zero)
{
long num_g_pos, j, k, l, kl, jkl;
num_g_pos = 0;
jkl = 0;
for (j = 0; j < num_band0; j++) {
kl = 0;
for (k = 0; k < num_band; k++) {
for (l = 0; l < num_band; l++) {
if (!g_zero[kl]) {
g_pos[num_g_pos][0] = j;
g_pos[num_g_pos][1] = k;
g_pos[num_g_pos][2] = l;
g_pos[num_g_pos][3] = jkl;
num_g_pos++;
}
jkl++;
kl++;
}
}
}
return num_g_pos;
}
static void
detailed_imag_self_energy_at_triplet(double *detailed_imag_self_energy,
double *imag_self_energy,
const long num_band0,
const long num_band,
const double *fc3_normal_squared,
const double *frequencies,
const long triplet[3],
const double *g1,
const double *g2_3,
const char *g_zero,
const double *temperatures,
const long num_temps,
const double cutoff_frequency)
{
long i, j, adrs_shift;
double *n1, *n2;
n1 = NULL;
n2 = NULL;
n1 = (double*)malloc(sizeof(double) * num_band);
n2 = (double*)malloc(sizeof(double) * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1,
n2,
num_band,
temperatures[i],
triplet,
frequencies,
cutoff_frequency);
for (j = 0; j < num_band0; j++) {
adrs_shift = j * num_band * num_band;
if (temperatures[i] > 0) {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy
(detailed_imag_self_energy + adrs_shift,
num_band,
fc3_normal_squared + adrs_shift,
n1,
n2,
g1 + adrs_shift,
g2_3 + adrs_shift,
g_zero + adrs_shift);
} else {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy_0K
(detailed_imag_self_energy + adrs_shift,
num_band,
fc3_normal_squared + adrs_shift,
n1,
n2,
g1 + adrs_shift,
g_zero + adrs_shift);
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
static double
collect_detailed_imag_self_energy(double *imag_self_energy,
const long num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const double *g2_3,
const char *g_zero)
{
long ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {continue;}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {continue;}
imag_self_energy[ij] = (((n1[i] + n2[j] + 1) * g1[ij] +
(n1[i] - n2[j]) * g2_3[ij]) *
fc3_normal_squared[ij]);
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static double
collect_detailed_imag_self_energy_0K(double *imag_self_energy,
const long num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const char *g_zero)
{
long ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {continue;}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {continue;}
imag_self_energy[ij] = g1[ij] * fc3_normal_squared[ij];
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static void set_occupations(double *n1,
double *n2,
const long num_band,
const double temperature,
const long triplet[3],
const double *frequencies,
const double cutoff_frequency)
{
long j;
double f1, f2;
for (j = 0; j < num_band; j++) {
f1 = frequencies[triplet[1] * num_band + j];
f2 = frequencies[triplet[2] * num_band + j];
if (f1 > cutoff_frequency) {
n1[j] = phonoc_bose_einstein(f1, temperature);
} else {
n1[j] = -1;
}
if (f2 > cutoff_frequency) {
n2[j] = phonoc_bose_einstein(f2, temperature);
} else {
n2[j] = -1;
}
}
}
|
Example_doacross.4.c
|
/*
* @@name: doacross.4c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.5
*/
double foo(int i, int j);
void work( int N, int M, double **A, double **B, double **C )
{
int i, j;
double alpha = 1.2;
#pragma omp for collapse(2) ordered(2)
for (i = 1; i < N-1; i++)
{
for (j = 1; j < M-1; j++)
{
A[i][j] = foo(i, j);
#pragma omp ordered depend(source)
B[i][j] = alpha * A[i][j];
#pragma omp ordered depend(sink: i-1,j) depend(sink: i,j-1)
C[i][j] = 0.2 * (A[i-1][j] + A[i+1][j] +
A[i][j-1] + A[i][j+1] + A[i][j]);
}
}
}
|
3d7pt_var.c
|
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d25pt_var.c
|
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
par_rap_communication.c
|
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
HYPRE_Int
hypre_GetCommPkgRTFromCommPkgA( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
HYPRE_Int *fine_to_coarse,
HYPRE_Int *tmp_map_offd)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(RT);
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
hypre_ParCSRCommPkg *comm_pkg;
HYPRE_Int num_recvs_RT;
HYPRE_Int *recv_procs_RT;
HYPRE_Int *recv_vec_starts_RT;
HYPRE_Int num_sends_RT;
HYPRE_Int *send_procs_RT;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
HYPRE_BigInt *col_map_offd_RT = hypre_ParCSRMatrixColMapOffd(RT);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols( hypre_ParCSRMatrixOffd(RT));
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(RT);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_BigInt *send_big_elmts = NULL;
HYPRE_BigInt my_first_cpt;
HYPRE_Int i, j;
HYPRE_Int vec_len, vec_start;
HYPRE_Int num_procs, my_id;
HYPRE_Int ierr = 0;
HYPRE_Int num_requests;
HYPRE_Int offd_col, proc_num;
HYPRE_Int num_threads = hypre_NumThreads();
HYPRE_Int size, rest, ns, ne, start;
HYPRE_Int index;
HYPRE_Int *proc_mark;
HYPRE_Int *change_array;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
/*--------------------------------------------------------------------------
* determine num_recvs, recv_procs and recv_vec_starts for RT
*--------------------------------------------------------------------------*/
proc_mark = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST);
for (i=0; i < num_recvs_A; i++)
proc_mark[i] = 0;
proc_num = 0;
num_recvs_RT = 0;
if (num_cols_offd_RT)
{
for (i=0; i < num_recvs_A; i++)
{
for (j=recv_vec_starts_A[i]; j<recv_vec_starts_A[i+1]; j++)
{
offd_col = tmp_map_offd[proc_num];
if (offd_col == j)
{
proc_mark[i]++;
proc_num++;
if (proc_num == num_cols_offd_RT) break;
}
}
if (proc_mark[i]) num_recvs_RT++;
if (proc_num == num_cols_offd_RT) break;
}
}
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_starts_A[num_sends_A], HYPRE_MEMORY_HOST);
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = hypre_ParCSRMatrixColStarts(RT)[0];
#else
my_first_cpt = hypre_ParCSRMatrixColStarts(RT)[my_id];
#endif
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
index = 0;
for (i = 0; i < num_sends_A; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg_A, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg_A, i+1); j++)
big_buf_data[index++] = my_first_cpt+
(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg_A,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg_A, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
for (i=0; i < num_cols_offd_RT; i++)
col_map_offd_RT[i] = fine_to_coarse_offd[tmp_map_offd[i]];
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
recv_procs_RT = hypre_CTAlloc(HYPRE_Int, num_recvs_RT, HYPRE_MEMORY_HOST);
recv_vec_starts_RT = hypre_CTAlloc(HYPRE_Int, num_recvs_RT+1, HYPRE_MEMORY_HOST);
j = 0;
recv_vec_starts_RT[0] = 0;
for (i=0; i < num_recvs_A; i++)
{
if (proc_mark[i])
{
recv_procs_RT[j] = recv_procs_A[i];
recv_vec_starts_RT[j+1] = recv_vec_starts_RT[j]+proc_mark[i];
j++;
}
}
/*--------------------------------------------------------------------------
* send num_changes to recv_procs_A and receive change_array from send_procs_A
*--------------------------------------------------------------------------*/
num_requests = num_recvs_A+num_sends_A;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
change_array = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_sends_A; i++)
hypre_MPI_Irecv(&change_array[i],1,HYPRE_MPI_INT,send_procs_A[i],0,comm,
&requests[j++]);
for (i=0; i < num_recvs_A; i++)
hypre_MPI_Isend(&proc_mark[i],1,HYPRE_MPI_INT,recv_procs_A[i],0,comm,
&requests[j++]);
hypre_MPI_Waitall(num_requests,requests,status);
hypre_TFree(proc_mark, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* if change_array[i] is 0 , omit send_procs_A[i] in send_procs_RT
*--------------------------------------------------------------------------*/
num_sends_RT = 0;
for (i=0; i < num_sends_A; i++)
if (change_array[i])
{
num_sends_RT++;
}
send_procs_RT = hypre_CTAlloc(HYPRE_Int, num_sends_RT, HYPRE_MEMORY_HOST);
send_map_starts_RT = hypre_CTAlloc(HYPRE_Int, num_sends_RT+1, HYPRE_MEMORY_HOST);
j = 0;
send_map_starts_RT[0] = 0;
for (i=0; i < num_sends_A; i++)
{
if (change_array[i])
{
send_procs_RT[j] = send_procs_A[i];
send_map_starts_RT[j+1] = send_map_starts_RT[j]+change_array[i];
j++;
}
}
/*--------------------------------------------------------------------------
* generate send_map_elmts
*--------------------------------------------------------------------------*/
send_map_elmts_RT = hypre_CTAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
send_big_elmts = hypre_CTAlloc(HYPRE_BigInt, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_sends_RT; i++)
{
vec_start = send_map_starts_RT[i];
vec_len = send_map_starts_RT[i+1]-vec_start;
hypre_MPI_Irecv(&send_big_elmts[vec_start],vec_len,HYPRE_MPI_BIG_INT,
send_procs_RT[i],0,comm,&requests[j++]);
}
for (i=0; i < num_recvs_RT; i++)
{
vec_start = recv_vec_starts_RT[i];
vec_len = recv_vec_starts_RT[i+1] - vec_start;
hypre_MPI_Isend(&col_map_offd_RT[vec_start],vec_len,HYPRE_MPI_BIG_INT,
recv_procs_RT[i],0,comm,&requests[j++]);
}
hypre_MPI_Waitall(j,requests,status);
for (i=0; i < send_map_starts_RT[num_sends_RT]; i++)
send_map_elmts_RT[i] = (HYPRE_Int)(send_big_elmts[i]-first_col_diag);
comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends_RT;
hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs_RT;
hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs_RT;
hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs_RT;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts_RT;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts_RT;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elmts_RT;
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(send_big_elmts, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixCommPkg(RT) = comm_pkg;
hypre_TFree(change_array, HYPRE_MEMORY_HOST);
return ierr;
}
HYPRE_Int
hypre_GenerateSendMapAndCommPkg(MPI_Comm comm, HYPRE_Int num_sends, HYPRE_Int num_recvs,
HYPRE_Int *recv_procs, HYPRE_Int *send_procs,
HYPRE_Int *recv_vec_starts, hypre_ParCSRMatrix *A)
{
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int i, j;
HYPRE_Int num_requests = num_sends+num_recvs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Int vec_len, vec_start;
hypre_ParCSRCommPkg *comm_pkg;
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *send_big_elmts = NULL;
/*--------------------------------------------------------------------------
* generate send_map_starts and send_map_elmts
*--------------------------------------------------------------------------*/
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_sends; i++)
hypre_MPI_Irecv(&send_map_starts[i+1],1,HYPRE_MPI_INT,send_procs[i],0,comm,
&requests[j++]);
for (i=0; i < num_recvs; i++)
{
vec_len = recv_vec_starts[i+1] - recv_vec_starts[i];
hypre_MPI_Isend(&vec_len,1,HYPRE_MPI_INT, recv_procs[i],0,comm,&requests[j++]);
}
hypre_MPI_Waitall(j,requests,status);
send_map_starts[0] = 0;
for (i=0; i < num_sends; i++)
send_map_starts[i+1] += send_map_starts[i];
send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_big_elmts = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1]-vec_start;
hypre_MPI_Irecv(&send_big_elmts[vec_start],vec_len,HYPRE_MPI_BIG_INT,
send_procs[i],0,comm,&requests[j++]);
}
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
hypre_MPI_Isend(&col_map_offd[vec_start],vec_len,HYPRE_MPI_BIG_INT,
recv_procs[i],0,comm,&requests[j++]);
}
hypre_MPI_Waitall(j,requests,status);
for (i=0; i < send_map_starts[num_sends]; i++)
send_map_elmts[i] = (HYPRE_Int)(send_big_elmts[i]-first_col_diag);
comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elmts;
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(send_big_elmts, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixCommPkg(A) = comm_pkg;
return 0;
}
|
bml_adjungate_triangle_ellpack_typed.c
|
#include "../../macros.h"
#include "../../typed.h"
#include "../bml_introspection.h"
#include "../bml_logger.h"
#include "../bml_types.h"
#include "bml_adjungate_triangle_ellpack.h"
#include "bml_types_ellpack.h"
#include <stdio.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Adjungates a triangle of a matrix in place.
*
* \ingroup adjungate_triangle_group
*
* \param A[in,out] The matrix for which the triangle should be adjungated
* \param triangle[out] Which triangle to adjungate ('u': upper, 'l': lower)
*
* WARNING: Please verify race conditions and parallel performances.
*/
void TYPED_FUNC(
bml_adjungate_triangle_ellpack) (
bml_matrix_ellpack_t * A,
char *triangle)
{
int A_N = A->N;
int A_M = A->M;
int l;
int ll;
int j;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#ifdef _OPENMP
omp_lock_t lock[A_M];
#endif
switch (*triangle)
{
case 'u':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#if defined(USE_OMP_OFFLOAD)
#pragma omp target update from(A_value[:A_N*A_M], A_index[:A_N*A_M], A_nnz[:A_N])
#endif
#pragma omp parallel for \
shared(A_N, A_M, A_index, A_nnz, A_value, lock) \
private(l, ll)
// WARNING: Please, check for race conditions ...
for (int i = 0; i < A_N; i++) // For every row
{
l = A_nnz[i];
for (int j = 0; j < l; j++) // We search for indices gt 0.
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll > 0)
{
if (ll > i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
case 'l':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#pragma omp parallel for \
shared(lock, A_N, A_M, A_index, A_nnz, A_value) \
private(l, ll)
// WARNING: Please, check for race conditions and parallel performances ...
for (int i = 0; i < A_N; i++)
{
l = A_nnz[i];
for (int j = 0; j < l; j++)
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll >= 0)
{
if (ll < i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
default:
LOG_ERROR("unknown triangle %c\n", triangle);
break;
}
#if defined(USE_OMP_OFFLOAD)
#pragma omp target update to(A_value[:A_N*A_M], A_index[:A_N*A_M], A_nnz[:A_N])
#endif
}
|
image.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
if (GetPixelWriteMask(next,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(append_image,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
continue;
}
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside != MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->read_mask=image->read_mask;
clone_image->write_mask=image->write_mask;
clone_image->alpha_trait=image->alpha_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mask_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
mask_image->read_mask=MagickFalse;
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
foo;
foo=(ssize_t) strtol(q,&q,10);
(void) foo;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t)
(MagickPathExtent-(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
/* FUTURE: Compare update with code from InterpretImageProperties()
Note that a 'filename:' property should not need depth recursion.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),option,(size_t)
(MagickPathExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) > (QuantumRange/2))
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth > (8*sizeof(MagickSizeType)))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*component != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) ResetMagickMemory(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=0;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=QuantumRange;
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelReadMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
mandelbrot.c
|
/*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include "omp.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
} // end testpoint()
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for private(m,c)
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
} // end inner for
} // end outer for
} // end mandelbrot()
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[argc-1]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
macro-2.c
|
// { dg-do compile }
#define p parallel
#define s(x) shared(x##1, x##2)
#define d(x) default(x)
void bar(int, int, int, int);
void foo(void)
{
int a1, a2, b1, b2;
#pragma omp p s(a) s(b) d(none)
bar(a1, a2, b1, b2);
}
|
yolov2.h
|
#ifndef YOLOV2_H
#define YOLOV2_H
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "cnn.h"
#define FLT_MAX 3.402823466e+38F
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYER_TYPE;
struct network;
typedef struct network network;
struct layer;
typedef struct layer layer;
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
void (*forward) (struct layer, struct network);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
// int dontload;
int dontsave;
// int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
//tree *softmax_tree;
size_t workspace_size;
};
void free_layer(layer l)
{
if(l.cweights) free(l.cweights);
if(l.indexes) free(l.indexes);
if(l.input_layers) free(l.input_layers);
if(l.input_sizes) free(l.input_sizes);
if(l.map) free(l.map);
if(l.rand) free(l.rand);
if(l.cost) free(l.cost);
if(l.state) free(l.state);
if(l.prev_state) free(l.prev_state);
if(l.forgot_state) free(l.forgot_state);
if(l.forgot_delta) free(l.forgot_delta);
if(l.state_delta) free(l.state_delta);
if(l.concat) free(l.concat);
if(l.concat_delta) free(l.concat_delta);
if(l.binary_weights) free(l.binary_weights);
if(l.biases) free(l.biases);
if(l.bias_updates) free(l.bias_updates);
if(l.scales) free(l.scales);
if(l.scale_updates) free(l.scale_updates);
if(l.weights) free(l.weights);
if(l.weight_updates) free(l.weight_updates);
if(l.delta) free(l.delta);
if(l.output) free(l.output);
if(l.squared) free(l.squared);
if(l.norms) free(l.norms);
if(l.spatial_mean) free(l.spatial_mean);
if(l.mean) free(l.mean);
if(l.variance) free(l.variance);
if(l.mean_delta) free(l.mean_delta);
if(l.variance_delta) free(l.variance_delta);
if(l.rolling_mean) free(l.rolling_mean);
if(l.rolling_variance) free(l.rolling_variance);
if(l.x) free(l.x);
if(l.x_norm) free(l.x_norm);
if(l.m) free(l.m);
if(l.v) free(l.v);
if(l.z_cpu) free(l.z_cpu);
if(l.r_cpu) free(l.r_cpu);
if(l.h_cpu) free(l.h_cpu);
if(l.binary_input) free(l.binary_input);
}
//void free_layer(layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
learning_rate_policy policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
// tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
network *make_network(int n);
layer get_network_output_layer(network *net);
typedef struct {
int w;
int h;
float scale;
float rad;
float dx;
float dy;
float aspect;
} augment_args;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
typedef struct{
float x, y, w, h;
} box;
typedef struct detection{
box bbox;
int classes;
float *prob;
float *mask;
float objectness;
int sort_class;
} detection;
typedef struct matrix{
int rows, cols;
float **vals;
} matrix;
typedef struct{
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
} data_type;
typedef struct load_args{
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int center;
int coords;
float jitter;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
// tree *hierarchy;
} load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
} box_label;
//network *load_network(char *cfg, char *weights, int clear);
//load_args get_base_args(network *net);
//void free_data(data d);
typedef struct{
char *key;
char *val;
int used;
} kvp;
typedef struct node{
void *val;
struct node *next;
struct node *prev;
} node;
typedef struct list{
int size;
node *front;
node *back;
} list;
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
/////////////////list begin
list *make_list()
{
list *l = (list *)malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
void *list_pop(list *l){
if(!l->back) return 0;
node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
free(b);
--l->size;
return val;
}
void list_insert(list *l, void *val)
{
node *new_node = (node *)malloc(sizeof(node));
new_node->val = val;
new_node->next = 0;
if(!l->back){
l->front = new_node;
new_node->prev = 0;
}else{
l->back->next = new_node;
new_node->prev = l->back;
}
l->back = new_node;
++l->size;
}
void free_node(node *n)
{
node *next;
while(n) {
next = n->next;
free(n);
n = next;
}
}
void free_list(list *l)
{
free_node(l->front);
free(l);
}
void free_list_contents(list *l)
{
node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
void **list_to_array(list *l)
{
void **a = (void **)calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
}
return a;
}
/////////////////list end
/////////////////////utils begin
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
unsigned char *read_file(char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
fseek(fp, 0, SEEK_END);
size = ftell(fp);
fseek(fp, 0, SEEK_SET);
unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char));
fread(text, 1, size, fp);
fclose(fp);
return text;
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n') ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char *line = (char *)malloc(size*sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char *)realloc(line, size*sizeof(char));
if(!line) {
printf("%ld\n", size);
malloc_error();
}
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(line[curr-1] == '\n') line[curr-1] = '\0';
return line;
}
/////////////////////utils end
////////////////////option_list begin
void option_insert(list *l, char *key, char *val)
{
kvp *p = (kvp *)malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for(i = 0; i < len; ++i){
if(s[i] == '='){
s[i] = '\0';
val = s+i+1;
break;
}
}
if(i == len-1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
void option_unused(list *l)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
char *option_find(list *l, char *key)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
///////////////////option_list end
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
image make_image(int w, int h, int c)
{
image out = make_empty_image(w,h,c);
out.data = (float *)calloc(h*w*c, sizeof(float));
return out;
}
static float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
static void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
static void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
void free_image(image m)
{
if(m.data){
free(m.data);
}
}
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for(k = 0; k < im.c; ++k){
for(r = 0; r < im.h; ++r){
for(c = 0; c < w; ++c){
float val = 0;
if(c == w-1 || im.w == 1){
val = get_pixel(im, im.w-1, r, k);
} else {
float sx = c*w_scale;
int ix = (int) sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for(k = 0; k < im.c; ++k){
for(r = 0; r < h; ++r){
float sy = r*h_scale;
int iy = (int) sy;
float dy = sy - iy;
for(c = 0; c < w; ++c){
float val = (1-dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if(r == h-1 || im.h == 1) continue;
for(c = 0; c < w; ++c){
float val = dy * get_pixel(part, c, iy+1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
void fill_image(image m, float s)
{
int i;
for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
void embed_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x,y,k);
set_pixel(dest, dx+x, dy+y, k, val);
}
}
}
}
image letterbox_image(image im, int w, int h)
{
int new_w = im.w;
int new_h = im.h;
if (((float)w/im.w) < ((float)h/im.h)) {
new_w = w;
new_h = (im.h * w)/im.w;
} else {
new_h = h;
new_w = (im.w * h)/im.h;
}
image resized = resize_image(im, new_w, new_h);
image boxed = make_image(w, h, im.c);
fill_image(boxed, .5);
//int i;
//for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
free_image(resized);
return boxed;
}
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if(channels) c = channels;
int i,j,k;
image im = make_image(w, h, c);
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index]/255.;
}
}
}
free(data);
return im;
}
void save_image_png(image im, const char *name)
{
char buff[256];
//sprintf(buff, "%s (%d)", name, windows);
sprintf(buff, "%s.png", name);
unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char));
int i,k;
for(k = 0; k < im.c; ++k){
for(i = 0; i < im.w*im.h; ++i){
data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
image **load_alphabet()
{
int i, j;
const int nsize = 8;
image **alphabets = (image **)calloc(nsize, sizeof(image));
for(j = 0; j < nsize; ++j){
alphabets[j] = (image *)calloc(128, sizeof(image));
for(i = 32; i < 127; ++i){
char buff[256];
sprintf(buff, "labels/%d_%d.png", i, j);
//alphabets[j][i] = load_image_color(buff, 0, 0);
alphabets[j][i] = load_image_stb(buff, 3);
}
}
return alphabets;
}
///////////////////activation begin
static inline float stair_activate(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
static inline float hardtan_activate(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
static inline float linear_activate(float x){return x;}
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
static inline float relu_activate(float x){return x*(x>0);}
static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
static inline float plse_activate(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
static inline float lhtan_activate(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
static inline float lhtan_gradient(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
static inline float hardtan_gradient(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
static inline float linear_gradient(float x){return 1;}
static inline float logistic_gradient(float x){return (1-x)*x;}
static inline float loggy_gradient(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
static inline float stair_gradient(float x)
{
if (floor(x) == x) return 0;
return 1;
}
static inline float relu_gradient(float x){return (x>0);}
static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
static inline float ramp_gradient(float x){return (x>0)+.1;}
static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
static inline float tanh_gradient(float x){return 1-x*x;}
static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
for(i = 0; i < n; ++i){
x[i] = activate(x[i], a);
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
///////////////////activation end
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
//printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
}
}
}
}
}
void forward_shortcut_layer(const layer l, network net)
{
//copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
//shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
int w = l.w;
int h = l.h;
int c = l.c;
float *add = net.layers[l.index].output;
float *out = l.output;
float *in = net.input;
int i,j,k;
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int index = i + w*(j + h*k );
out[index] = in[index] + add[index];
}
}
}
}
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
layer l;
memset(&l,0,sizeof(layer));
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_shortcut_layer;
return l;
}
int convolutional_out_height(layer l)
{
return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(layer l)
{
return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
static size_t get_workspace_size(layer l){
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] += biases[i];
}
}
}
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] *= scales[i];
}
}
}
}
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c,h,w;
int height_col = (height + 2*pad - ksize) / stride + 1;
int width_col = (width + 2*pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(TA && !TB)
// gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(!TA && TB)
// gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else
// gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
}
}
}
void forward_batchnorm_layer(layer l, network net)//for conv
{
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding)
{
// (output_w - 1)*Kernel_stride + Kernel_size = Input_w
const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int x, y, of, inf;
int m,n;
for( of = 0; of < OutFM_num; of++){
for( y = 0; y < output_h; y++) {
for( x = 0; x < output_w; x++){
float tmp = 0.0;
for(inf = 0;inf < InFM_num; inf++)
{
int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding;
for(m = 0;m < Kernel_size; m++)
{
for(n = 0;n < Kernel_size; n++)
{
int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size;
bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w);
bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h);
if(inFM_width&&inFM_height)
tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n];
}
}
}
Output[of*output_w*output_h + y*output_w + x] = tmp;
}
}
}
}
layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = CONVOLUTIONAL;
l.groups = groups;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.binary = binary;
l.xnor = xnor;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
// l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float));
// l.biases = (float *)calloc(n, sizeof(float));
l.nweights = c/groups*n*size*size;
l.nbiases = n;
int out_w = convolutional_out_width(l);
int out_h = convolutional_out_height(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
// l.output = (float *)calloc(l.batch*l.outputs, sizeof(float));
// l.forward = forward_convolutional_layer;
if(batch_normalize){
// l.scales = (float *)calloc(n, sizeof(float));
// l.rolling_mean = (float *)calloc(n, sizeof(float));
//l.rolling_variance = (float *)calloc(n, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
return l;
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if(forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void forward_upsample_layer(const layer l, network net)
{
//fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
int c = l.c;
int h = l.h;
int w = l.w;
int stride = l.stride;
float *in = net.input;
float *out = l.output;
int i, j, k;
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = k*w*h + (j/stride)*w + i/stride;
int out_index = k*w*h*stride*stride + j*w*stride + i;
out[out_index] = in[in_index];
}
}
}
}
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if(stride < 0){
stride = -stride;
l.reverse=1;
l.out_w = w/stride;
l.out_h = h/stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_upsample_layer;
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
void forward_route_layer(const layer l, network net)
{
int i, j;
int offset = 0;
for(i = 0; i < l.n; ++i){
int index = l.input_layers[i];
float *input = net.layers[index].output;
int input_size = l.input_sizes[i];
copy_cpu(input_size, input, 1, l.output + offset, 1);
offset += input_size;
}
}
layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr,"route ");
layer l;
memset(&l,0,sizeof(layer));
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for(i = 0; i < n; ++i){
fprintf(stderr," %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
// l.output = (float *)calloc(outputs*batch, sizeof(float));;
l.forward = forward_route_layer;
return l;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_yolo_layer(const layer l, network net)
{
int i,j,b,t,n;
//char line[256];
//FILE *fp3;
//char filename[256];
//sprintf(filename, "yolo_layer_%d.txt", l.outputs);
//printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
//int x;
// for( x = 0; x < l.outputs; x++)
//{
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
}
}
return ;
}
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
//l.cost = (float *)calloc(1, sizeof(float));
l.biases = (float *)calloc(total*2, sizeof(float));
if(mask) l.mask = mask;
else{
l.mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
l.mask[i] = i;
}
}
//l.bias_updates = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
//l.truths = 90*(4 + 1);
//l.delta = (float *)calloc(batch*l.outputs, sizeof(float));
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
for(i = 0; i < total*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_yolo_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
/////////////////praser begin
typedef struct{
char *type;
list *options;
}section;
list *read_cfg(char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[yolo]")==0) return YOLO;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[conv]")==0
|| strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
if (strcmp(type, "[activation]")==0) return ACTIVE;
if (strcmp(type, "[logistic]")==0) return LOGXENT;
if (strcmp(type, "[l2norm]")==0) return L2NORM;
if (strcmp(type, "[net]")==0
|| strcmp(type, "[network]")==0) return NETWORK;
if (strcmp(type, "[crnn]")==0) return CRNN;
if (strcmp(type, "[gru]")==0) return GRU;
if (strcmp(type, "[lstm]") == 0) return LSTM;
if (strcmp(type, "[rnn]")==0) return RNN;
if (strcmp(type, "[conn]")==0
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
if (strcmp(type, "[lrn]")==0
|| strcmp(type, "[normalization]")==0) return NORMALIZATION;
if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
if (strcmp(type, "[soft]")==0
|| strcmp(type, "[softmax]")==0) return SOFTMAX;
if (strcmp(type, "[route]")==0) return ROUTE;
if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
return BLANK;
}
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
void parse_data(char *data, float *a, int n)
{
int i;
if(!data) return;
char *curr = data;
char *next = data;
int done = 0;
for(i = 0; i < n && !done; ++i){
while(*++next !='\0' && *next != ',');
if(*next == '\0') done = 1;
*next = '\0';
sscanf(curr, "%g", &a[i]);
curr = next+1;
}
}
typedef struct size_params{
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network *net;
} size_params;
layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
int pad = option_find_int_quiet(options, "pad",0);
int padding = option_find_int_quiet(options, "padding",0);
int groups = option_find_int_quiet(options, "groups", 1);
if(pad) padding = size/2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
l.flipped = option_find_int_quiet(options, "flipped", 0);
l.dot = option_find_float_quiet(options, "dot", 0);
return l;
}
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',')+1;
}
*num = n;
}
return mask;
}
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
layer parse_shortcut(list *options, size_params params, network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
layer from = net->layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
s.alpha = option_find_float_quiet(options, "alpha", 1);
s.beta = option_find_float_quiet(options, "beta", 1);
return s;
}
layer parse_upsample(list *options, size_params params, network *net)
{
int stride = option_find_int(options, "stride",2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
layer parse_route(list *options, size_params params, network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if(!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *layers = (int *)calloc(n, sizeof(int));
int *sizes = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int index = atoi(l);
l = strchr(l, ',')+1;
if(index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net->layers[index].outputs;
}
int batch = params.batch;
layer route_layer = make_route_layer(batch, n, layers, sizes);
layer first = net->layers[layers[0]];
route_layer.out_w = first.out_w;
route_layer.out_h = first.out_h;
route_layer.out_c = first.out_c;
for(i = 1; i < n; ++i){
int index = layers[i];
layer next = net->layers[index];
if(next.out_w == first.out_w && next.out_h == first.out_h){
route_layer.out_c += next.out_c;
}else{
route_layer.out_h = route_layer.out_w = route_layer.out_c = 0;
}
}
return route_layer;
}
void softmax(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
}
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
//if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
char line[256];
FILE *fp3;
char filename[256];
sprintf(filename, "yolo_layer_%d.txt", 123123);
printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
int x;
for( x = 0; x < l.outputs; x++)
{
sprintf(line, "%f\n", net.input[x]);
if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
}
fclose(fp3);
#endif
if(!net.train) return;
}
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.biases = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.background = option_find_int_quiet(options, "background", 0);
l.max_boxes = option_find_int_quiet(options, "max",30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore",0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.mask_scale = option_find_float(options, "mask_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match",0);
char *tree_file = option_find_str(options, "tree", 0);
// if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
// if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int out_c = c/(stride*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int in_index = i + w*(j + h*(k + c*b));
int c2 = k % out_c;
int offset = k / out_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
}
}
}
}
}
void forward_reorg_layer(const layer l, network net)
{
int i;
//if(l.flatten){
// memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
// if(l.reverse){
// flatten(l.output, l.w*l.h, l.c, l.batch, 0);
// }else{
// flatten(l.output, l.w*l.h, l.c, l.batch, 1);
// }
//} else if (l.extra) {
// for(i = 0; i < l.batch; ++i){
// copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
// }
//} else if (l.reverse){
// reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
//} else {
reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
//}
}
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.extra = extra;
l.h = h;
l.w = w;
l.c = c;
l.flatten = flatten;
if(reverse){
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c/(stride*stride);
}else{
l.out_w = w/stride;
l.out_h = h/stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
if(l.extra){
l.out_w = l.out_h = l.out_c = 0;
l.outputs = l.inputs + l.extra;
}
if(extra){
fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
} else {
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
}
int output_size = l.outputs * batch;
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_reorg_layer;
return l;
}
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
int flatten = option_find_int_quiet(options, "flatten",0);
int extra = option_find_int_quiet(options, "extra",0);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
void forward_maxpool_layer(layer l, network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for(b = 0; b < l.batch; ++b){
for(k = 0; k < c; ++k){
for(i = 0; i < h; ++i){
for(j = 0; j < w; ++j){
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for(n = 0; n < l.size; ++n){
for(m = 0; m < l.size; ++m){
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? net.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
//layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
//{
// layer l;
// memset(&l,0,sizeof(layer));
// l.type = MAXPOOL;
// l.batch = batch;
// l.h = h;
// l.w = w;
// l.c = c;
// l.pad = padding;
// l.out_w = (w + 2*padding)/stride;
// l.out_h = (h + 2*padding)/stride;
// l.out_c = c;
// l.outputs = l.out_h * l.out_w * l.out_c;
// l.inputs = h*w*c;
// l.size = size;
// l.stride = stride;
// int output_size = l.out_h * l.out_w * l.out_c * batch;
// //l.indexes = (int *)calloc(output_size, sizeof(int));
// //l.output = (float *)calloc(output_size, sizeof(float));
// l.forward = forward_maxpool_layer;
//
// fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
// return l;
//}
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + padding - size)/stride + 1;
l.out_h = (h + padding - size)/stride + 1;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
//l.indexes = calloc(output_size, sizeof(int));
//l.output = calloc(output_size, sizeof(float));
//l.delta = calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
//l.backward = backward_maxpool_layer;
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
int padding = option_find_int_quiet(options, "padding", size-1);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return maxpool_layer;
}
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
if (strcmp(s, "exp")==0) return EXP;
if (strcmp(s, "sigmoid")==0) return SIG;
if (strcmp(s, "steps")==0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->notruth = option_find_int_quiet(options, "notruth",0);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->random = option_find_int_quiet(options, "random", 0);
net->adam = option_find_int_quiet(options, "adam", 0);
if(net->adam){
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .0000001);
}
net->h = option_find_int_quiet(options, "height",0);
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
net->center = option_find_int_quiet(options, "center",0);
net->clip = option_find_float_quiet(options, "clip", 0);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
net->power = option_find_float_quiet(options, "power", 4);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
} else if (net->policy == STEPS){
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *steps = (int *)calloc(n, sizeof(int));
float *scales = (float *)calloc(n, sizeof(float));
for(i = 0; i < n; ++i){
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',')+1;
p = strchr(p, ',')+1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
} else if (net->policy == EXP){
net->gamma = option_find_float(options, "gamma", 1);
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
} else if (net->policy == POLY || net->policy == RANDOM){
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
network *parse_network_cfg(char *filename)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if(!n) error("Config file has no sections");
network *net = make_network(sections->size - 1);
net->gpu_index = -1;
size_params params;
section *s = (section *)n->val;
list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
params.h = net->h;
params.w = net->w;
params.c = net->c;
params.inputs = net->inputs;
params.batch = net->batch;
params.time_steps = net->time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
//layer l = {0};
layer l;
memset(&l,0,sizeof(layer));
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == ROUTE){
l = parse_route(options, params, net);
}else if(lt == UPSAMPLE){
l = parse_upsample(options, params, net);
}else if(lt == SHORTCUT){
l = parse_shortcut(options, params, net);
}else if(lt == REGION){
l = parse_region(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == MAXPOOL){
l = parse_maxpool(options, params);
}else if(lt == REORG){
l = parse_reorg(options, params);
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.clip = net->clip;
l.truth = option_find_int_quiet(options, "truth", 0);
l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
l.dontsave = option_find_int_quiet(options, "dontsave", 0);
// l.dontload = option_find_int_quiet(options, "dontload", 0);
// l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
//l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
l.smooth = option_find_float_quiet(options, "smooth", 0);
option_unused(options);
net->layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if(n){
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->output = out.output;
//net->input = (float *)calloc(net->inputs*net->batch, sizeof(float));
workspace_size = 0;//donot calloc workspace
//if(workspace_size){
// //printf("%ld\n", workspace_size);
// net->workspace = (float *)calloc(1, workspace_size);
//}
return net;
}
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
current = (section *)malloc(sizeof(section));
list_insert(options, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, current->options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
void load_convolutional_weights(layer l, FILE *fp)
{
int num = l.nweights;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize){
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
void load_weights_upto(network *net, char *filename, int start, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0
printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000);
if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
//fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
}else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4
int i;
for(i = start; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
load_convolutional_weights(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
void load_weights(network *net, char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
/////////////////praser end
/////////////////network begin
load_args get_base_args(network *net)
{
load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
args.min = net->min_crop;
args.max = net->max_crop;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.center = net->center;
args.saturation = net->saturation;
args.hue = net->hue;
return args;
}
network *load_network(char *cfg, char *weights, int clear)
{
network *net = parse_network_cfg(cfg);
//if(weights && weights[0] != 0){
// load_weights(net, weights);
//}
if(clear) (*net->seen) = 0;
return net;
}
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
case ACTIVE:
return "activation";
case LOCAL:
return "local";
case DECONVOLUTIONAL:
return "deconvolutional";
case CONNECTED:
return "connected";
case RNN:
return "rnn";
case GRU:
return "gru";
case LSTM:
return "lstm";
case CRNN:
return "crnn";
case MAXPOOL:
return "maxpool";
case REORG:
return "reorg";
case AVGPOOL:
return "avgpool";
case SOFTMAX:
return "softmax";
case DETECTION:
return "detection";
case REGION:
return "region";
case YOLO:
return "yolo";
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
case ROUTE:
return "route";
case SHORTCUT:
return "shortcut";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
return "batchnorm";
default:
break;
}
return "none";
}
network *make_network(int n)
{
network *net = (network *)calloc(1, sizeof(network));
net->n = n;
net->layers = (layer *)calloc(net->n, sizeof(layer));
net->seen = (size_t *)calloc(1, sizeof(size_t));
net->t = (int *)calloc(1, sizeof(int));
net->cost = (float *)calloc(1, sizeof(float));
return net;
}
void forward_network(network *netp)
{
network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
l.forward(l, net);
net.input = l.output;
// printf("layer [%d]\n",i);
}
}
void set_temp_network(network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].temperature = t;
}
}
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].batch = b;
}
}
float *network_predict(network *net, float *input)
{
network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
net->delta = 0;
forward_network(net);
float *out = net->output;
*net = orig;
return out;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if(l.output[obj_index] > thresh){
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
if(l.type == DETECTION || l.type == REGION){
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
//printf("num_detections nboxes = %d\n",nboxes);
if(num) *num = nboxes;
detection *dets = (detection *)calloc(nboxes, sizeof(detection));
for(i = 0; i < nboxes; ++i){
dets[i].prob = (float *)calloc(l.classes, sizeof(float));
}
return dets;
}
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
// if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
if(objectness <= thresh) continue;
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
return count;
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
}
if(l.type == REGION){
get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for(i = 0; i < n; ++i){
free(dets[i].prob);
if(dets[i].mask) free(dets[i].mask);
}
free(dets);
}
int network_width(network *net){return net->w;}
int network_height(network *net){return net->h;}
layer get_network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
void free_network(network *net)
{
int i;
for(i = 0; i < net->n; ++i){
free_layer(net->layers[i]);
}
free(net->layers);
if(net->input) free(net->input);
if(net->truth) free(net->truth);
free(net);
}
layer network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
int network_inputs(network *net)
{
return net->layers[0].inputs;
}
int network_outputs(network *net)
{
return network_output_layer(net).outputs;
}
float *network_output(network *net)
{
return network_output_layer(net).output;
}
//////////////////network end
//////////////////////box begin
int nms_comparator(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if(b.sort_class >= 0){
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if(diff < 0) return 1;
else if(diff > 0) return -1;
return 0;
}
float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
float box_intersection(box a, box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if(w < 0 || h < 0) return 0;
float area = w*h;
return area;
}
float box_union(box a, box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
float box_iou(box a, box b)
{
return box_intersection(a, b)/box_union(a, b);
}
void do_nms_sort(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total-1;
for(i = 0; i <= k; ++i){
if(dets[i].objectness == 0){
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k+1;
for(k = 0; k < classes; ++k){
for(i = 0; i < total; ++i){
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
}
}
}
}
//////////////////////box end
//////////////////////image begin
float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
float ratio = ((float)x/max)*5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
static float get_pixel_extend(image m, int x, int y, int c)
{
if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
/*
if(x < 0) x = 0;
if(x >= m.w) x = m.w-1;
if(y < 0) y = 0;
if(y >= m.h) y = m.h-1;
*/
if(c < 0 || c >= m.c) return 0;
return get_pixel(m, x, y, c);
}
void composite_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x, y, k);
float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
set_pixel(dest, dx+x, dy+y, k, val * val2);
}
}
}
}
image border_image(image a, int border)
{
image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
int x,y,k;
for(k = 0; k < b.c; ++k){
for(y = 0; y < b.h; ++y){
for(x = 0; x < b.w; ++x){
float val = get_pixel_extend(a, x - border, y - border, k);
if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
set_pixel(b, x, y, k, val);
}
}
}
return b;
}
image copy_image(image p)
{
image copy = p;
copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
return copy;
}
image tile_images(image a, image b, int dx)
{
if(a.w == 0) return copy_image(b);
image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
embed_image(a, c, 0, 0);
composite_image(b, c, a.w + dx, 0);
return c;
}
image get_label(image **characters, char *string, int size)
{
size = size/10;
if(size > 7) size = 7;
image label = make_empty_image(0,0,0);
while(*string){
image l = characters[size][(int)*string];
image n = tile_images(label, l, -size - 1 + (size+1)/2);
free_image(label);
label = n;
++string;
}
image b = border_image(label, label.h*.25);
free_image(label);
return b;
}
void draw_label(image a, int r, int c, image label, const float *rgb)
{
int w = label.w;
int h = label.h;
if (r - h >= 0) r = r - h;
int i, j, k;
for(j = 0; j < h && j + r < a.h; ++j){
for(i = 0; i < w && i + c < a.w; ++i){
for(k = 0; k < label.c; ++k){
float val = get_pixel(label, i, j, k);
set_pixel(a, i+c, j+r, k, rgb[k] * val);
}
}
}
}
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if(x1 < 0) x1 = 0;
if(x1 >= a.w) x1 = a.w-1;
if(x2 < 0) x2 = 0;
if(x2 >= a.w) x2 = a.w-1;
if(y1 < 0) y1 = 0;
if(y1 >= a.h) y1 = a.h-1;
if(y2 < 0) y2 = 0;
if(y2 >= a.h) y2 = a.h-1;
for(i = x1; i <= x2; ++i){
a.data[i + y1*a.w + 0*a.w*a.h] = r;
a.data[i + y2*a.w + 0*a.w*a.h] = r;
a.data[i + y1*a.w + 1*a.w*a.h] = g;
a.data[i + y2*a.w + 1*a.w*a.h] = g;
a.data[i + y1*a.w + 2*a.w*a.h] = b;
a.data[i + y2*a.w + 2*a.w*a.h] = b;
}
for(i = y1; i <= y2; ++i){
a.data[x1 + i*a.w + 0*a.w*a.h] = r;
a.data[x2 + i*a.w + 0*a.w*a.h] = r;
a.data[x1 + i*a.w + 1*a.w*a.h] = g;
a.data[x2 + i*a.w + 1*a.w*a.h] = g;
a.data[x1 + i*a.w + 2*a.w*a.h] = b;
a.data[x2 + i*a.w + 2*a.w*a.h] = b;
}
}
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for(i = 0; i < w; ++i){
draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
}
}
image float_to_image(int w, int h, int c, float *data)
{
image out = make_empty_image(w,h,c);
out.data = data;
return out;
}
image threshold_image(image im, float thresh)
{
int i;
image t = make_image(im.w, im.h, im.c);
for(i = 0; i < im.w*im.h*im.c; ++i){
t.data[i] = im.data[i]>thresh ? 1 : 0;
}
return t;
}
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class_t < 0) {
strcat(labelstr, names[j]);
class_t = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class_t >= 0){
int width = im.h * .006;
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class_t*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}
//////////////////////image end
/////////////////////////////////////////////////////////////////////20181229 reorg WeightQ BetaQ ok InputQ ok start input opt ok input_g eval op1 wuxiaoguo layer1 still 0.14s relu opt // input opt ok //output opt ok// wieght opt ok //out4 gg (4)n4m32i1o1 ok
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
#define S 2
#define K 3
#define Tn 4
#define Tm 32
#define Tr 26
#define Tc 26
#define OnChipIB_Width ((Tc-1)*S+K)
#define OnChipIB_Height ((Tr-1)*S+K)
#define MAX_BETA_LENGTH (1024)
#define INTERWIDTH 20
void yolov2_hls_ps(network *net, float *input)
{
int x;
network orig = *net;
net->input = input;
int weight_offset[32] = {864, 18432, 73728, 8192, 73728,
294912, 32768, 294912, 1179648, 131072, 1179648, 131072,
1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184,
9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024,
512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_index = 0;
int *Weight_buf = (int *)calloc(203767168/4/2,sizeof(int));
int *Beta_buf = (int *)calloc((43044+4)/4/2,sizeof(int));
FILE *fp_w = fopen("weightsv2_comb_reorg_ap16.bin", "rb");
if(!fp_w) file_error("weightsv2_comb_reorg_ap16.bin");
FILE *fp_b = fopen("biasv2_comb_ap16.bin", "rb");
if(!fp_b) file_error("biasv2_comb_ap16.bin");
fread(Weight_buf, sizeof(int), 203767168/4/2, fp_w);
fread(Beta_buf, sizeof(int), (43044+4)/4/2, fp_b);
fclose(fp_w);
fclose(fp_b);
#define QNUM 23
int inputQ[QNUM+1];
int weightQ[QNUM];
int betaQ[QNUM];
FILE *Qin;
Qin = fopen("yolov2_ap16_inout_maxQ_24.bin","rb");
if(!Qin) file_error("Qin error 1\n");
fread(inputQ,sizeof(int),QNUM+1,Qin);
fclose(Qin);
if(inputQ[20] < inputQ[21])
inputQ[21] = inputQ[20];
else
inputQ[20] = inputQ[21];
for(x=0;x<QNUM+1;x++)
printf("[%2d inputQ]=%2d\n",x,inputQ[x]);
Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 2\n");
fread(weightQ,sizeof(int),QNUM,Qin);
fclose(Qin);
for(x=0;x<QNUM;x++)
printf("[%2d weightQ]=%2d\n",x,weightQ[x]);
Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 4\n");
fread(betaQ,sizeof(int),QNUM,Qin);
fclose(Qin);
for(x=0;x<QNUM;x++)
printf("[%2d betaQ]=%2d\n",x,betaQ[x]);
#define MEM_LEN (416*416*32/2+208*208*32/2)
int *Memory_buf = (int*)calloc(MEM_LEN+1024/2+1024/2,sizeof(int));
int *Memory_top = Memory_buf+1024/2;
int *Memory_bottom = Memory_top + MEM_LEN;
int tmp_in;
short current_in,next_in;
bool NextPixelInFlag = true;
int InputPixelOffset = 0;
int *Input_ptr = (int *)Memory_top;
for(x=0;x<416*416*3;x++)//1st Layer input Q14
{
if(NextPixelInFlag)
{
current_in = (short)(input[x]*pow(2.0,14));
NextPixelInFlag = false;
}
else
{
next_in = (short)(input[x]*pow(2.0,14));
tmp_in = (next_in<<16) + (current_in);
Input_ptr[InputPixelOffset] = tmp_in;
InputPixelOffset++;
NextPixelInFlag = true;
}
}
float *region_buf = (float *)calloc(sizeof(float),13*13*425);
int* in_ptr[32];
int* out_ptr[32];
#define ROUTE16_LEN (26*26*512/2)
#define CONV27_LEN (13*13*256/2)
#define CONV24_LEN (13*13*1024/2)
for(x=0;x<18;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - net->layers[x].outputs/2 ;
}
else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
for(x=18;x<25;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs/2;
}else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
in_ptr[26] = Memory_bottom - ROUTE16_LEN;
out_ptr[26] = Memory_top;
in_ptr[27] = Memory_top;
out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN;
in_ptr[29] = out_ptr[27];
out_ptr[29] = Memory_top;
in_ptr[30] = Memory_top;
out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*3)/2;
in_ptr[31] = out_ptr[30];
network netp = *net;
int i;
int woffset = 0;
int aoffset = 0;
int boffset = 0;
int TR,TC,TM,TN;
int output_w,output_h;
int rLoops,cLoops,mLoops,nLoops;
double sum_gop = 0.0;
int INPUTQ;
unsigned char TRow;
int trow_loops;
int T2Rate;
for(i = 0; i < netp.n; ++i)
{
netp.index = i;
layer l = netp.layers[i];
printf("Layer[%2d]: ",i);
switch(l.type)
{
case CONVOLUTIONAL:
printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
sum_gop += (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.;
output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ;
output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(l.n,Tm);
TN = MIN(l.c,Tn);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.n)/TM);
nLoops = (int)ceil(((float)l.c)/TN);
switch(l.w)
{
case 26:
T2Rate = 2;
break;
case 13:
T2Rate = 4;
break;
default:
T2Rate = 1;
break;
}
TRow = (TR-1)*l.stride+l.size;
trow_loops = (int)ceil(((float)TRow/T2Rate));
INPUTQ = inputQ[offset_index];
if(i==26)
INPUTQ = inputQ[12];
YOLO2_FPGA(in_ptr[i],in_ptr[i],in_ptr[i],in_ptr[i],out_ptr[i],out_ptr[i],Weight_buf+woffset/2,Beta_buf+boffset/2,
l.c,l.n,l.size,
l.stride,l.w,l.h,output_w,output_h,
l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0,
TM,TN,TR,TC,
mLoops,nLoops,rLoops,cLoops,0,
INPUTQ,inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index],trow_loops);
printf("TR=%d,TC=%d,TM=%d,TN=%d,rLoops=%d,cLoops=%d,mLoops=%d,nLoops=%d\n",TR,TC,TM,TN,rLoops,cLoops,mLoops,nLoops);
woffset += weight_offset[offset_index];
boffset += beta_offset[offset_index];
offset_index++;
break;
case MAXPOOL:
printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
//output_w = (l.w - l.size)/l.stride + 1 ;
//output_h = (l.h - l.size)/l.stride + 1 ;
output_w = l.out_h;
output_h = l.out_w;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TR = MIN(output_h,TR);
TC = MIN(output_w,TC);
TM = MIN(Tm,Tn);
TM = MIN(l.c,TM);
TN = TM;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.c)/TM);
switch(l.w)
{
case 26:
T2Rate = 2;
break;
case 13:
T2Rate = 4;
break;
default:
T2Rate = 1;
break;
}
TRow = (TR-1)*l.stride+l.size;
trow_loops = (int)ceil(((float)TRow/T2Rate));
YOLO2_FPGA(in_ptr[i],in_ptr[i],in_ptr[i],in_ptr[i],out_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c,
l.size,l.stride,l.w,l.h,output_w,output_h,
0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,1,
inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH,trow_loops);
break;
case REORG:
printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = 26;
output_h = 32*13;
TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = 4;
TN = TM;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = 1;
switch(52)
{
case 26:
T2Rate = 2;
break;
case 13:
T2Rate = 4;
break;
default:
T2Rate = 1;
break;
}
TRow = (TR-1)*l.stride+l.stride;
trow_loops = (int)ceil(((float)TRow/T2Rate));
YOLO2_FPGA(in_ptr[i],in_ptr[i],in_ptr[i],in_ptr[i],out_ptr[i],out_ptr[i],NULL,NULL,1,4,
l.stride,l.stride,52,32*26,output_w,output_h,
0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,2,
inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH,trow_loops);
break;
case ROUTE:
printf("outputMemory:%8d;route ",l.outputs);
int j;
for(j = 0; j < l.n; ++j){
printf(" %d", l.input_layers[j]);
}
printf("\n");
break;
case REGION:
printf("outputMemory:%8d;Detection\n",l.outputs);
//netp.input = in_ptr[i];
double LastLayerOutputPara = pow(2.0,-inputQ[QNUM]);
bool NextPixelFlag = true;
int OutputPixelOffset = 0;
short current_p,next_p,output_p;
int *Output_ptr = (int *)(in_ptr[i]);
for(j=0;j<l.outputs;j++)
{
if(NextPixelFlag)
{
int tmp_p = Output_ptr[OutputPixelOffset];
OutputPixelOffset++;
current_p = tmp_p;
next_p = tmp_p >> 16;
output_p = current_p;
NextPixelFlag = false;
}else
{
output_p = next_p;
NextPixelFlag = true;
}
region_buf[j] = output_p*LastLayerOutputPara;
}
netp.input = region_buf;
forward_region_layer(l,netp);
break;
}
netp.input = l.output;
}
printf("SUM_GOP=%g\n",sum_gop);
*net = orig;
free(region_buf);
free(Memory_buf);
free(Weight_buf);
free(Beta_buf);
}
///////////////////////////////////////////////////////////////////////20181229 reorg WeightQ BetaQ ok InputQ ok end input opt ok input_g eval op1 //(4)n4m32i1o1 ok
#endif
|
SymbolicDerivatives.h
|
#ifndef _SymbolicDerivatives_H_
#define _SymbolicDerivatives_H_
using namespace std;
#ifdef _OPENMP
#include <omp.h>
#endif
#define WITH_MMVII false
#define WITH_EIGEN false
#if WITH_EIGEN
#include "ExternalInclude/Eigen/Dense" // TODO => replace with standard eigen file
#define EIGEN_ALLIGNMENT_IN_MMVII EIGEN_MAKE_ALIGNED_OPERATOR_NEW
#else
#define EIGEN_ALLIGNMENT_IN_MMVII
#endif
/*
*/
/** \file SymbolicDerivates.h
\brief File for generating symbolic derivate
Classes for generated symbolic derivative. All classes are single template classes.
The template parameter indicate the numerical type used for storage/computation
(float, double ...)
This file is the only file to include. It contains :
* declaration of operators
* definition of "main" classes : cFormula , cCoordinatorF , cImplemF " ;
* the 3 class for Atomic formula who will (probably) stay the same : Unkown, Observation, Constants
This file include 2 files corresponding to following type of formula :
* classes for "unary" formulas in "MMVII_FormDer_UnaryOp.h"
* classes for "binary" formulas in "MMVII_FormDer_BinaryOp.h"
These 2 files have "vocation" to be extended during the future.
-------------------------------------------------
* cFormula<Type> : represent a mathematicall formula; as in math :
- if F is a formula, exp(F), log(F) ....are formulas
- if F1 and F2 are formulas, F1+F2 , F1*F2 ... are formulas
- there exist some atomic formulas like constants, unknown and observations
- if F is a formula F->Derivate(k) is a formula corresponding to is derivate dF/dXk
Formulas are a complete algebric type.
* cCoordinatorF<Type> : is the "coordinator" class.
This class has, between others, the responsability of :
- creating the initial atomic formula corresponding to unknowns and observation
- maintain an inventory of existing formulas for efficiency purpose
* Using this library is mainly :
- create a coordinator with a given number of unkown and observations
- create a formula using atoms an operator, generally the user function creating a
formula will be a template that can operate on any complete algebric type
(double, float, Formula , jets ...)
- indicate to the coordinator the formula you want work on, with generally its derivate
- evaluate the values of the formula for given unknows and observations
cFormula<Type> is no more than an encapsulation of a pointer on the "concrete" class cImplemF.
* cImplemF<Type> : is the mother class of all the formula. It's a pure abstract class, it contains
several pure virtual methods. The two main methods are "Derivate" and "ComputeBuf", this is
the two methods the users will have to define when extension to the library with new
operator is required.
- cFormula<Type> Derivate(int aK) return the formula of its derivate by Xk. Heres is
two example extract from the code, one for multiplication, other from unknowns :
o return mF2*mF1->Derivate(aK) + mF1*mF2->Derivate(aK); // From cMulF : (FG)' = F'G + FG'
o return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0(); // from cUnknownF
- void ComputeBuf(int aK0,int aK1) : update the buffer of its data, once it subformula has
been updated, this is method that does the real job. Here an extract from cExpF and cDivF :
o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = std::exp(mDataF[aK]);
o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = mDataF1[aK] / mDataF2[aK];
*/
#include "SymbDer_Common.h"
#if (WITH_MMVII)
#include "include/MMVII_all.h"
#include "include/MMVII_Derivatives.h"
using namespace MMVII;
#else //========================================================== WITH_MMVI
class cMemCheck
{
};
#include <memory>
#include <map>
#include <iostream>
#include <cassert>
#include "memory.h"
#include <memory>
#include <iostream>
#include <fstream>
#include <string>
#include <typeinfo>
#include <vector>
#include <list>
#include <map>
#include <ctime>
#include <chrono>
#include <math.h>
#include <cmath>
#include <algorithm>
#include <sstream>
#include <iomanip>
#endif //========================================================== WITH_MMVI
// REDUCTION RULES
// TODO => REPLACE BY METHOD ON COORDINATOR WHEN THEY IMPROVE THINGS ....
#define DOREDUCE false
#define REDUCE_CSTE true // Cste+Cste => cste
#define REDUCE_MM DOREDUCE // - - x => x ; a-(-b) => a+b
#define REDUCE_ASSOCP DOREDUCE /* B + (A + C) = > A + ( B + C),
more generally order the + operator, could be done with '*' */
#define REDUCE_DISTRIB DOREDUCE // A#B ~ A#C=> A#(B~C) ; # in "*/" and ~ in "+-"
#define REDUCE_ApA DOREDUCE // A+A => 2*A, not good by itself, but may creat other reduc
#define REDUCE_DIST1 DOREDUCE // A + A*C => A *(1+C) si C est csteto have all constant close
static inline void SHOW_REDUCE(const std::string & aMes) {} // std::cout << "REDUCE " << aMes << "\n";}
namespace NS_SymbolicDerivative
{
/* *************************************************** */
/* */
/* P0-Definition of global functions */
/* */
/* *************************************************** */
/// The CreateCste is required for formula, so we need it also on num type
template <class Type> inline Type CreateCste(const Type & aV,const Type &) { return aV; }
/// because pow is defined in std and there is cast int->float that would make it unaccessible
template <class Type> inline Type pow(const Type & aV,const int & aExp)
{
return std::pow(aV,Type(aExp));
}
//============= BASIC ERROR HANDLING ==============
/** This function computes derivates by finites difference
It is used in the tests to check correction of symbolic derivatives. Also used
in didactic parts.
*/
template <class Type,class TypeFct>
std::vector<Type> NumericalDerivate
(
TypeFct & aFctr, ///< Function
const std::vector<Type> & aVUk, ///< Unknown
const std::vector<Type> & aVObs, ///< Observations
int aNumVar, ///< Num of unknown we derivate by
const Type & aEpsilon ///< "Small" number to compute variations
)
{
std::vector<Type> aVPlus = aVUk;
aVPlus.at(aNumVar) += aEpsilon;
std::vector<Type> aResPlus = aFctr( aVPlus,aVObs);
std::vector<Type> aVMinus = aVUk;
aVMinus.at(aNumVar) -= aEpsilon;
std::vector<Type> aResMinus = aFctr( aVMinus,aVObs);
std::vector<Type> aDerivate;
for (size_t aK=0 ; aK<aResPlus.size() ; aK++)
aDerivate.push_back((aResPlus.at(aK)-aResMinus.at(aK)) / (2*aEpsilon));
return aDerivate;
}
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * Main user interace * */
/* * * */
/* *************************************************** */
/* *************************************************** */
// ------------- The two classes visible by user are cFormula and cCoordinatorF ------
/** Abstraction of mathemicall formula, this the object manipulated by user, its
has all algerbric operation required. This object is just an encapsulation of
a pointer on cImplemF.
*/
template <class TypeElem> class cFormula ;
/** Class for managing the "context", i.e. coordinating all the formula
and their derivative corresponding to a single use .
*/
template <class TypeElem> class cCoordinatorF;
// -------- Declaration all binary operators ----------------
// For each operator with have the 3 versions "Formula x Formula" ,
// "Number x Formula" and "Formula x Number" , the two last are rather
// syntactic suggar (i.e. make usage easier, but do not extend the library power)
// Operator +
template <class TypeElem> cFormula <TypeElem>
operator +(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator +(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator +(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator *
template <class TypeElem> cFormula <TypeElem>
operator *(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator *(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator *(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator -
template <class TypeElem> cFormula <TypeElem>
operator -(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator -(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator -(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator /
template <class TypeElem> cFormula <TypeElem>
operator /(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator /(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator /(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// pow
template <class TypeElem> cFormula <TypeElem>
pow (const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> pow (const TypeElem & aV1,const cFormula <TypeElem> & aF2);
/// This one defined in MMVII_FormDer_UnaryOp.h
template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const TypeElem & aV2);
template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const int & aV2);
// -------- integer low power ----------------
template <class TypeElem> cFormula <TypeElem> square(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> cube(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow4(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow5(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow6(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow7(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow8(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow9(const cFormula <TypeElem> & aF);
// --- other unary operator
template <class TypeElem> cFormula <TypeElem> exp(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> operator - (const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> log(const cFormula <TypeElem> & aF);
// ---- sometime we need a templetized way to create constants
template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF);
/// --- powI , return pow of integral exponent,
template <class Type> Type powI(const Type & aV,const int & aExp)
{
switch (aExp)
{
// case 0 : return Type(1.0);
case 0 : return CreateCste(1.0,aV);
case 1 : return aV;
case 2 : return square(aV);
case 3 : return cube(aV);
case 4 : return pow4(aV);
case 5 : return pow5(aV);
case 6 : return pow6(aV);
case 7 : return pow7(aV);
case 8 : return pow8(aV);
case 9 : return pow9(aV);
}
// else use the classical pow
return pow(aV,aExp);
}
// -------- Declaration of Coordinator class ----------------
template <class TypeElem> class cCoordinatorF : public cCalculator<TypeElem>,public cMemCheck
{
public :
typedef cFormula <TypeElem> tFormula;
typedef std::vector<TypeElem> tOneRes;
// --------------------------- Constructors / Destructor -------------------
/// Constructor with explicit Id for Unknown/Observation. Used if we want to analyze the generated code
inline cCoordinatorF(const string &aName, int SzBuf, const std::vector<std::string> & aVecUK, const std::vector<std::string> & aVecObs);
/// Constructor with basic Id (used if we dont generate code, or dont want to analyse it by human)
inline cCoordinatorF(const string &aName, int SzBuf,int aNbUnknown,int aNbObservation);
/// Destructeur will free allocated formulas
virtual ~cCoordinatorF();
/// Copies are not allowed on this kind of object.
cCoordinatorF(const cCoordinatorF<TypeElem> &) = delete;
// --------------------------- Accessors to Atomic Formulas -------------------
const std::vector<tFormula>& VUk() const {return mVFormUnknowns;} ///< Unknowns
const std::vector<tFormula>& VObs() const {return mVFormObservations;} ///< Observations
// --------------------------- Manipulation -------------------
/// Set the formulas that with be used for computation
void SetCurFormulas(const std::vector<tFormula> &);
/** SetCurFormulas + all its derivative , order of storage will be
VF0 dVF0/dX0 dVF0/dX1 .... VF1 dVF1/dX0 ... */
void SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF);
// ---------- Code generator ---------------
/** Generate code, class cName , file cName.h, cName.cpp. Return filename w/o ext, or "" if error */
std::string GenerateCode(const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExpr(aFilePrefix);
}
std::string GenerateCodeTemplate(const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExprTemplate(aFilePrefix);
}
std::string GenerateCodeForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExprForType(aTypeName,aFilePrefix);
}
std::string GenCodeShortExpr(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "", true);
}
std::string GenCodeLonExpr(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "", false);
}
std::string GenCodeShortExprTemplate(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "template<>", true);
}
std::string GenCodeLonExprTemplate(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "template<>", false);
}
std::string GenCodeShortExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, aTypeName, true);
}
std::string GenCodeLonExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, aTypeName, false);
}
// =========== Parametrisation of the generated code =========
/// The default value is not always adequate "SymbDer/SymbDer_Common.h"
void SetHeaderIncludeSymbDer(const std::string &aH) {mHeaderIncludeSymbDer= aH;}
void SetDirGenCode(const std::string &aDir) {mDirGenCode= aDir;}
void SetUseAllocByName(bool aUse) {mUseAllocByName= aUse;}
private : // END-USER
/* =================================================================================
ABOVE WAS THE REAL PUBLIC PART OF cCoordinatorF FOR USER OF LIBRARY. THE REST
IS PUBLIC FOR IMPLEMENTERS BUT NOT NEEDED BY USER
=====================================================================================*/
public :
// Result of several evaluation are stored in a buffer, Eigen vector are used
// as they implement efficiently arithmetical operation
// typedef Eigen::Array<TypeElem, 1, Eigen::Dynamic> tBuf;
typedef std::vector<TypeElem> tBuf;
// --------------------------- Acces to function from names, values -------------------
/// Indicate if the formula corresponding to a given string already exist
inline bool ExistFunc(const std::string & aName) const
{
return (mDicoFunc.find(aName) != mDicoFunc.end());
}
/// Func of given name, Error if don't exist
inline tFormula FuncOfName(const std::string & aName) const ;
/// Add a function (put it in dico), Error if already exist
inline void AddFormula(tFormula aPF)
{
if (ExistFunc(aPF->Name())) InternalError ("Multiple add of identic name :[" + aPF->Name() + "]",this->Name());
mDicoFunc[aPF->Name()] = aPF;
mVAllFormula.push_back(aPF);
aPF->TryReducAssoc();
}
/// Func of given constant, create if don't exist
inline tFormula CsteOfVal(const TypeElem & aCste) ;
tFormula Cste0() const {return mCste0;} ///< Acces to a current constant
tFormula Cste1() const {return mCste1;} ///< Another Acces to a current constant
tFormula Cste2() const {return mCste2;} ///< Yet another Acces to a current constant
/// Tuning --- Print the stack of function as a tree
inline void ShowStackFunc() const;
/// Formula used for computation,
const std::vector<tFormula>& VReached() const {return mVReachedF;}
// Current (top) formulas
const std::vector<tFormula>& VCurrent() const {return mVCurF;}
size_t NbCurFonc() const {return mVAllFormula.size();}
private :
/// Called by cCalculator::PushNewEvals to Set Unknown/Observations
virtual void SetNewUks(const std::vector<TypeElem> &aVUks) override;
virtual void SetNewObs(const std::vector<TypeElem> &aVObs) override;
/** Make the evaluation of current functions on pushed values */
virtual void DoEval() override;
/// Used to generate automatically Id for Unknown/Observatio, when we dont need to control them explicitely
static std::vector<std::string> MakeAutomId(const std::string & aPrefix,int aNb);
std::string GenCodeCommon(const string &aPrefix, string aTypeName, bool isShortExpr) const;
std::string TypeElemName() const;
size_t mNbCste; ///< Number Cste
std::vector<tFormula> mVFormUnknowns; ///< Vector of All Unknowns
std::vector<tFormula> mVFormObservations; ///< Vector of All Observations
std::map<std::string,tFormula> mDicoFunc; ///< Map Name => Func
std::vector<tFormula> mVAllFormula; ///< Vector of All Func, allow to parse them in creation order
std::map<TypeElem,tFormula> mDicoCste; ///< Map Value => Func Constant
tFormula mCste0; ///< Fonc constant null
tFormula mCste1; ///< Fonc constant 1
tFormula mCste2; ///< Fonc constant 1
std::vector<tFormula> mVCurF; ///< Current evaluted formulas
std::vector<tFormula> mVReachedF; ///< Formula "reachable" i.e. necessary to comput mVCurF
std::string mHeaderIncludeSymbDer; ///< Compilation environment may want to change it
std::string mDirGenCode; ///< Want to put generated code in a fixed folde ?
bool mUseAllocByName; ///< Do we generated code for allocatin frpm name (with cName2Calc)
};
/* **************************************************
* *
* Pre-Declaration of all classes *
* Not required by compilation *
* (Except for cImplemF )but I like to have *
* a quick view of all existing classes *
* *
* **************************************************/
/** "Mother" Interface class of all classes implementing the service ,
abstract class with pure virtual method
*/
template <class TypeElem> class cImplemF ;
// --------------- "Atomic" function : Unknown, constant, observation-----------------
template <class TypeElem> class cAtomicF ; ///< Mother Class of all atomic formulas
/// "Observations" corresponding to user constant (change for each evaluation)
template <class TypeElem> class cObservationF ;
/// "Constant" function
template <class TypeElem> class cConstantF ;
/// "Unknown" for representing coordinates function X0,X1,X2 ....
template <class TypeElem> class cUnknownF;
// ----------------------------- Unary operator ------------------------------------
template <class TypeElem> class cUnaryF ; ///< Mother Class of all unary operator
template <class TypeElem> class cSquareF ; ///< Class for square operator
template <class TypeElem> class cExpF ; ///< Class for exponential operator
template <class TypeElem> class cMin1F ; ///< Class for Unary Minus
template <class TypeElem> class cLogF ; ///< Class for neperien log
// -------------------------------- Binary operator -------------------------------------
template <class TypeElem> class cBinaryF ; ///< Mother class of binary operators
template <class TypeElem> class cSumF ; ///< Class for sum of 2 functions
template <class TypeElem> class cMulF ; ///< Class for multiplication of 2 functions
template <class TypeElem> class cSubF ; ///< Class for substraction of 2 functions
template <class TypeElem> class cDivF ; ///< Class for division of 2 functions
template <class TypeElem> class cPowF ; ///< Class for division of 2 functions
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * Definition of all classes * */
/* * * */
/* *************************************************** */
/* *************************************************** */
// ------------------- 2 "Main" Classes -------------------------
// cFormula / cImplemF
// ----------------------------------------------------------------
template <class TypeElem> class cImplemF : public cMemCheck
{
public :
// See eigen documentation, this macro is mandatory for alignment reason
// EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_ALLIGNMENT_IN_MMVII
typedef TypeElem tElem;
typedef cCoordinatorF<TypeElem> tCoordF;
typedef typename tCoordF::tBuf tBuf;
typedef typename tCoordF::tFormula tFormula;
//----------- For derivation and reduction--------------
virtual bool IsCste(const TypeElem &) const {return false;} ///< To redefine in constant func, Used for simplification in "/ * + -"
virtual bool IsDistribInt() const {return false;} ///< To redefine in *,/ for distributivity
virtual tFormula Derivate(int aK) const = 0; ///< Compute the formula of it's derivative to Kth unknown
/** In this functionwe try to make reduction using associativity (and maybe others),
as we want to do it only on maximal chains of + (or *) this has to be run by the father of
the chain
*/
void TryReducAssoc();
virtual cImplemF<TypeElem> * ReducAssoc() {return this;}
virtual bool IsMult() const {return false;}
virtual bool IsSum() const {return false;}
bool ReducAssocTried() const {return mReducAssocTried;}
virtual cFormula<TypeElem> VOper2(const tFormula &,const tFormula &) const; ///< Use in distributive reducion to recal the operator binaire if suitable
// -------------- For Computation -------------------------
/// Method that wil compute data inside mBuf
virtual void ComputeBuf(int aK0,int aK1) =0;
/// Return "Sub"-formula referenced
virtual std::vector<tFormula> Ref() const =0;
// ---------- Accessors ---------------
const std::string & Name() const {return mName;} ///< Standard accessor
tCoordF * CoordF() const {return mCoordF;} ///< Standard accesor
int NumGlob() const {return mNumGlob;} ///< Standard accessor
// ---------- Acces to Buf data ---------------
void SetBuf(size_t anIndex,const TypeElem & aVal) {mBuf.at(anIndex) = aVal;}
const TypeElem & GetBuf(size_t anIndex) {return mBuf.at(anIndex);}
TypeElem * DataBuf() {return mDataBuf;}
// ---------- Reached Flag ---------------
bool Reached() const {return mReached;} ///< Standard accessor
void SetReached(bool IsReached) {mReached = IsReached;} ///< Fix Reached
/// Compute in the reference graphe and put formula explored in VReached
void CalcRecursiveDepth(std::vector<tFormula> & VReached) ;
int Depth() const {return mDepth;} ///< Standard accessor
void SetDepth(int aDepth) {mDepth = aDepth;} ///< Fix Reached
// ---------- Code gen -----------------------
virtual bool isAtomic() const { return false;}
virtual std::string GenCodeFormName() const {return NameGlob();} // Name of formula, referenced value for Atomic
virtual std::string GenCodeShortExpr() const = 0; // N-Addresses code generation
virtual std::string GenCodeDef() const = 0; // Formula definition generation
virtual std::string GenCodeRef() const; // Formula reference generation
int UsedCnt() const {return mUsedCnt;} ///< Standard accessor
// ---------- Tuning / Debugging / Analysing ---------------
/// Used to print constant from generic formula
virtual const TypeElem * ValCste() const {return nullptr;}
/// Infixed "Pretty" Print . For tuning and checking (i.e correction of reduction, derivative, rewrite ...)
virtual std::string InfixPPrint() const =0;
/// Number of reference that would occur without reduction on identic formula (to test performance in paper)
int RecursiveRec() const;
// Every where a reference name is needed
std::string NameGlob() const { return "F" + std::to_string(NumGlob());}
/// Access at global level is 4 reducing, also it is used 4 implemant in Unary & Binary
virtual const std::string & NameOperator() const = 0;
// -------------------- Destructor / Constructor --------------------------
virtual ~cImplemF () {} ///< Add a virtual ~X() when we have virtual methods, who knows ...
protected :
inline cImplemF (tCoordF * aCoordF,const std::string & aName) :
mCoordF (aCoordF),
mBuf (mCoordF->SzBuf(),TypeElem(0.0)),
mDataBuf (mBuf.data()),
mName (aName),
mNumGlob (mCoordF->NbCurFonc()),
mReached (false),
mDepth (-1),
mUsedCnt (0),
mReducAssocTried (false)
{
}
tCoordF * mCoordF; ///< Coordinator that manage all the funcion cooperating
tBuf mBuf; ///< Buf to store values
TypeElem * mDataBuf; ///< Raw pointer
const std::string mName; ///< string represention of the formula as for ex : C2, X1, V0 , square F3, F18/F3 ...
int mNumGlob; ///< Global number (!= Num in class)
bool mReached; ///< Flag to know if a formula is usefull for compute current
int mDepth; ///< Used for topological sort
private :
cImplemF (const cImplemF<TypeElem> &) = delete; ///< No Copy
unsigned mUsedCnt;
bool mReducAssocTried;
};
template <class TypeElem> class cFormula
{
public :
typedef cCoordinatorF<TypeElem> tCoordF;
typedef cImplemF<TypeElem> tImplemF;
typedef typename tCoordF::tFormula tFormula;
// -------------------- constructor -------------------
/// Construct from a pointer, standard
cFormula (tImplemF * aRawPtr) :
mPtr (aRawPtr)
{
}
/// Default constructor, required by some code (vector ?)
cFormula ():
cFormula <TypeElem> (nullptr)
{
}
// --------------- operator on pointer ---------------------
// UNUSED 4 NOW tImplemF & operator*() const {return *mPtr;} ///< Standard behaviour of a pointer
tImplemF * operator->() const {return mPtr;} ///< Standard behaviour of a pointer
tImplemF * RawPtr() const {return mPtr;} ///< Explicit acces
// DO NOT WORK const std::unique_ptr<tImplemF> operator->() const {return std::unique_ptr<mPtr>;}
bool IsNull() const {return mPtr==nullptr;} ///< Safer than giving acces to raw pointer
// --------------- Naming ---------------------
/// Generate the unique indentifier of a binary expression
std::string NameFormulaBin(const std::string & aNameOper,const tFormula & aF2) const
{
return (*this)->NameGlob() + aNameOper + aF2->NameGlob();
}
/// Generate the unique indentifier of a unary expression
std::string NameFormulaUn(const std::string & aNameOper) const
{
return aNameOper + " " + (*this)->NameGlob();
}
/// To allow destruction without giving access to raw pointer
void FreeMem() {delete mPtr; mPtr=nullptr;}
private :
tImplemF* mPtr; ///< Faster than shared and deallocation is easy as object controlled by context
};
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * ATOMIC FORMULA * */
/* * * */
/* *************************************************** */
/* *************************************************** */
/* ----------------------------------------------------------
Class for atomic formula
MOTHER CLASS : cAtomicF
DERIVED : cUnknownF / cObservationF / cConstantF
----------------------------------------------------------------*/
template <class TypeElem> class cAtomicF : public cImplemF<TypeElem>
{
public :
typedef cImplemF<TypeElem> tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
/// Should work always
std::string InfixPPrint() const override {return tImplemF::Name();}
/// Rule deriv=0 , work by default (constant and observations)
tFormula Derivate(int aK) const override {return tImplemF::mCoordF->Cste0();}
/// Generally nothing to do in atomic, their buffer has been filled witj adequate values
void ComputeBuf(int aK0,int aK1) override { }
std::vector<tFormula> Ref() const override{return std::vector<tFormula>();}
protected :
bool isAtomic() const override { return true;}
std::string GenCodeFormName() const override { return this->Name();}
std::string GenCodeShortExpr() const override { return this->GenCodeFormName();}
std::string GenCodeRef() const override { return this->GenCodeFormName();}
std::string GenCodeDef() const override { return mCodeValue;}
inline cAtomicF(tCoordF * aCoordF,const std::string& aName) :
tImplemF (aCoordF,aName)
{ }
std::string mCodeValue;
};
template <class TypeElem> class cUnknownF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
const std::string & NameOperator() const override {static std::string s("UK"); return s;}
std::string InfixPPrint() const override {return tImplemF::Name();}
/// rule : dXi/dXj = delta(i,j)
tFormula Derivate(int aK) const override
{
return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0();
}
friend tCoordF;
private :
inline cUnknownF(tCoordF * aCoordF,const std::string& aName,int aNum) :
tAtom (aCoordF,aName),
mNumUnk (aNum)
{
this->mCodeValue = "this->mVUk[aK][" + std::to_string(mNumUnk) + "]";
}
int mNumUnk; ///< Number of the Unknown; like : 0 for X0, 1 for X1 ...
};
template <class TypeElem> class cObservationF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
friend tCoordF;
const std::string & NameOperator() const override {static std::string s("Obs"); return s;}
private :
inline cObservationF(tCoordF * aCoordF,const std::string & aName,int aNum) :
tAtom (aCoordF,aName),
mNum (aNum)
{
this->mCodeValue = "this->mVObs[aK][" + std::to_string(mNum) + "]";
}
int mNum; ///< Number of the Observation; like : 0 for V0, 1 for V1 ...
};
template <class TypeElem> class cConstantF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
typedef typename tCoordF::tBuf tBuf;
friend tCoordF;
bool IsCste(const TypeElem &K) const override {return mVal==K;} ///< Here we know if we are a constant of value K
const TypeElem * ValCste() const override {return &mVal;}
const std::string & NameOperator() const override {static std::string s("Cste"); return s;}
protected :
inline cConstantF(tCoordF * aCoordF,const std::string & aName,int aNum,const TypeElem& aVal) :
tAtom (aCoordF,aName),
mNum (aNum),
mVal (aVal)
{
for (auto & aV : tImplemF::mBuf) aV = aVal; // Initialize buf with const val
std::stringstream ss;
// Precision that ensures that Num0 -> ASCII -> Num1 => Num1 == Num0
// May cause some odd but correct value for non exactly representable numbers
ss << std::setprecision(std::numeric_limits<decltype(mVal)>::max_digits10) << mVal;
this->mCodeValue = ss.str();
}
std::string GenCodeFormName() const override { return this->mCodeValue;}
int mNum;
const TypeElem mVal;
};
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * cFormula / cImplemF / cCoordinatorF * */
/* * External Definition of methods * */
/* * * */
/* *************************************************** */
/* *************************************************** */
/* ---------------------- */
/* cFormula */
/* ---------------------- */
template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF)
{
return aF->CoordF()->CsteOfVal(aV);
}
/* ---------------------- */
/* cImplemF */
/* ---------------------- */
template <class TypeElem> int cImplemF<TypeElem>::RecursiveRec() const
{
int aRes = 1;
for (const auto & aF : Ref())
{
aRes += aF->RecursiveRec();
}
return aRes;
}
template <class TypeElem> void cImplemF<TypeElem>::CalcRecursiveDepth(std::vector<tFormula> & aVReached)
{
if (mDepth != -1) {
mUsedCnt++;
return; // if we were already here , nothing to do
}
mUsedCnt = 1;
for (const auto & aF : Ref())
{
aF->CalcRecursiveDepth(aVReached); // parse sub formula
mDepth = std::max(mDepth,aF->mDepth); // Memo max depth
}
mDepth++; // my depth is 1 + max of depth of my referenced formulas
aVReached.push_back(tFormula(this));
}
template <class TypeElem> void cImplemF<TypeElem>::TryReducAssoc()
{
for (auto & aF : Ref())
{
// F will not belong to the terminal command that will have to reparsed
// If we are in the config (A+B) + .. maybe the chain will grow later
if (aF->NameOperator() != NameOperator())
{
aF = aF->ReducAssoc();
}
aF->mReducAssocTried = true;
}
}
template <class TypeElem> cFormula<TypeElem> cImplemF<TypeElem>::VOper2(const tFormula & aF1,const tFormula &) const
{
InternalError("Incorrect virtual binary operation",this->mCoordF->Name());
return aF1;
}
template <class TypeElem>
std::string cImplemF<TypeElem>::GenCodeRef() const
{
if (UsedCnt() == 1) {
return GenCodeDef();
} else {
return GenCodeFormName();
}
}
/* ---------------------- */
/* cCoordinatorF */
/* ---------------------- */
template <class TypeElem>
std::vector<std::string> cCoordinatorF<TypeElem>::MakeAutomId(const std::string & aPrefix,int aNb)
{
std::vector<std::string> aRes;
for (int aK=0 ; aK<aNb ; aK++)
aRes.push_back(aPrefix+ std::to_string(aK));
return aRes;
}
template <class TypeElem>
cCoordinatorF<TypeElem>::cCoordinatorF
(
const std::string & aName,
int aSzBuf,
const std::vector<std::string> & aVNameUK,
const std::vector<std::string> & aVNameObs
) :
cCalculator<TypeElem>(aName,aSzBuf,aVNameUK.size(),aVNameObs.size()),
mNbCste (0),
mCste0 (CsteOfVal(0.0)),
mCste1 (CsteOfVal(1.0)),
mCste2 (CsteOfVal(2.0)),
mHeaderIncludeSymbDer ("SymbDer/SymbDer_Common.h"),
mDirGenCode (""),
mUseAllocByName (false) // For strict compatibility with previous Jo's code
{
// Generate all the function corresponding to unknown
for (size_t aNumUK=0 ; aNumUK<this->mNbUK ; aNumUK++)
{
tFormula aFuncUK(new cUnknownF<TypeElem>(this,aVNameUK[aNumUK],aNumUK)); // Create it
mVFormUnknowns.push_back(aFuncUK); // Push it in vector of coordinat func
AddFormula(aFuncUK); // Add to all func
}
// Generate all the function corresponding to observations
for (size_t aNumObs=0 ; aNumObs<this->mNbObs ; aNumObs++)
{
tFormula aFuncObs(new cObservationF<TypeElem>(this,aVNameObs[aNumObs],aNumObs)); // Create it
mVFormObservations.push_back(aFuncObs); // Push it in vector of coordinat func
AddFormula(aFuncObs); // Add to all func
}
}
template <class TypeElem>
cCoordinatorF<TypeElem>::cCoordinatorF(const string &aName, int aSzBuf, int aNbUK, int aNbObs) :
cCoordinatorF<TypeElem>(aName,aSzBuf,MakeAutomId("X",aNbUK),MakeAutomId("V",aNbObs))
{
}
template <class TypeElem>
cCoordinatorF<TypeElem>::~cCoordinatorF()
{
for (auto & aForm : mVAllFormula)
{
aForm.FreeMem();
}
}
template <class TypeElem>
cFormula<TypeElem> cCoordinatorF<TypeElem>::CsteOfVal(const TypeElem & aCste)
{
tFormula & aRef = mDicoCste[aCste];
if (aRef.IsNull()) // If it was not existing, the map contain now the def element
{
// The ! is used to make constant first in alphab order, used for reduction ?
aRef=tFormula(new cConstantF<TypeElem>(this,"_C"+std::to_string(mNbCste),mNbCste,aCste));
mNbCste++;
AddFormula(aRef);
}
return aRef;
}
template <class TypeElem>
cFormula <TypeElem> cCoordinatorF<TypeElem>::FuncOfName(const std::string & aName) const
{
const auto & anIt = mDicoFunc.find(aName);
if (anIt == mDicoFunc.end()) InternalError ("Try to acces non existing name :[" + aName + "]",this->Name());
return anIt->second;
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetNewUks(const std::vector<TypeElem> & aVUks)
{
for (size_t aK=0 ; aK<aVUks.size() ; aK++) // Init Vals of formulas buffer
{
mVFormUnknowns[aK]->SetBuf(this->mNbInBuf,aVUks[aK]);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetNewObs(const std::vector<TypeElem> & aVObs)
{
for (size_t aK=0 ; aK<aVObs.size() ; aK++) // Init Vals of formulas buffer
{
mVFormObservations[aK]->SetBuf(this->mNbInBuf,aVObs[aK]);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF)
{
std::vector<tFormula> aVWDer;
for (const auto & aF : aVF)
{
aVWDer.push_back(aF);
for (size_t aUK=0 ; aUK<this->mNbUK ; aUK++)
{
aVWDer.push_back(aF->Derivate(aUK));
}
}
SetCurFormulas(aVWDer);
this->mWithDer = true;
this->mSzInterval = 1+this->mNbUK;
this->mNbElem = aVF.size();
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetCurFormulas(const std::vector<tFormula> & aVF0)
{
std::vector<tFormula> aVF;
for(auto aF : aVF0)
{
if (! aF->ReducAssocTried())
{
aF = tFormula(aF->ReducAssoc());
// std::cout << "GGGGGGGG " << aF->Name() << " \n";
}
aVF.push_back(aF);
}
this->mWithDer=false;
this->mSzInterval = 1;
this->mNbElem = aVF0.size();
mVCurF = aVF;
// Erase previous
for (auto & aF : mVReachedF)
aF->SetDepth(-1);
mVReachedF.clear();
// Compute depth for topologicall sort
for (auto & aF : mVCurF)
{
aF->CalcRecursiveDepth(mVReachedF);
}
// Use depth to have topological sort
// In fact it is probably not necessary to make this sort, initial order of reaching order
// should work; by the way : no dammage ..
std::sort
(
mVReachedF.begin(),
mVReachedF.end(),
[](const tFormula & aF1,const tFormula &aF2) {return aF1->Depth() < aF2->Depth();}
);
// Make Buf of Res to have right size
for (auto & aLine : this->mBufLineRes)
{
aLine.resize(mVCurF.size());
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::DoEval()
{
// Make the real hard stuff, compute the data, the depedancy ordering should make it coherent
#ifdef _OPENMP
#pragma omp parallel
{
size_t thread_num = omp_get_thread_num();
size_t num_threads = omp_get_num_threads();
size_t start = thread_num * this->mNbInBuf / num_threads;
size_t end = (thread_num + 1) * this->mNbInBuf / num_threads;
if (end>start)
{
for (auto & aF : mVReachedF)
{
aF->ComputeBuf(start,end);
}
}
}
#else
for (auto & aF : mVReachedF)
{
aF->ComputeBuf(0,this->mNbInBuf);
}
#endif
for (size_t aKLine=0 ; aKLine<this->mNbInBuf ; aKLine++)
{
std::vector<TypeElem> & aLine = this->mBufLineRes[aKLine];
for (size_t aKFunc=0 ; aKFunc< mVCurF.size() ; aKFunc++)
aLine[aKFunc] = mVCurF[aKFunc]->GetBuf(aKLine);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::ShowStackFunc() const
{
for (const auto & aForm : mVAllFormula)
{
if (aForm->Depth()==-1)
std::cout << "---" ;
else
std::cout << "-" << aForm->Depth() << "-";
std::cout << aForm->UsedCnt() << "- ";
std::cout << aForm->NameGlob() << " => " << aForm->Name();
const TypeElem * aPV = aForm->ValCste();
if (aPV)
std::cout << " ; Val=" << *aPV;
std::cout << "\n";
}
std::cout << "REACHED ";
for (const auto & aForm : mVReachedF)
{
std::cout << aForm->NumGlob() << " ";
}
std::cout << "\n";
std::cout << "CUR ";
for (const auto & aForm : mVCurF)
{
std::cout << aForm->NumGlob() << " ";
}
std::cout << "\n";
}
template <class TypeElem>
std::string cCoordinatorF<TypeElem>::GenCodeCommon(const std::string& aPrefix, std::string aTypeName, bool isShortExpr) const
{
std::string aName = this->Name();
if (aName.size() == 0)
UserSError("Formula name is empty.",this->Name());
for (auto &c : aName) {
if (!std::isalnum(c) && c != '_')
UserSError("Formula name is not a valid C++ identifier: '_,a..z,A..Z,0..9' only.",this->Name());
}
std::string aClassName = "c" + aName;
if (aTypeName.size()==0)
aTypeName = this->TypeElemName();
bool isTemplated = aTypeName=="template<>";
if (isTemplated)
aTypeName = "TypeElem";
std::string aVectorName = "std::vector<" + aTypeName + ">";
if (! isShortExpr)
aClassName = aClassName + "LongExpr";
std::string aParentClass = "cCalculator<" + aTypeName + ">";
std::string aFileName = aPrefix + aClassName;
std::ofstream aOs(mDirGenCode + aFileName + ".h");
if (!aOs)
return "";
aOs << "#ifdef _OPENMP\n"
"#include <omp.h>\n"
"#endif\n"
"#include \"" << mHeaderIncludeSymbDer << "\"\n"
"\n"
"namespace NS_SymbolicDerivative {\n\n";
if (isTemplated) {
aOs << "template<typename TypeElem>\n";
}
aOs << "class " << aClassName << " : public " << aParentClass << "\n"
"{\n"
"public:\n"
" typedef " << aParentClass << " Super;\n"
" " << aClassName << "(size_t aSzBuf) : \n"
" Super(\"" << aName << "\", aSzBuf,"
<< this->mNbUK << ","
<< this->mNbObs << ","
<< this->mWithDer << ","
<< this->mSzInterval << "),\n"
" mVUk(aSzBuf),mVObs(aSzBuf)\n"
" {\n"
" this->mNbElem = " << this->mNbElem << ";\n"
" for (auto& line : this->mBufLineRes)\n"
" line.resize(" << mVCurF.size() << ");\n"
" for (auto& aUk : this->mVUk)\n"
" aUk.resize(this->NbUk());\n"
" for (auto& aObs : this->mVObs)\n"
" aObs.resize(this->NbObs());\n"
" }\n"
" static std::string FormulaName() { return \"" << aName << "\";}\n"
"protected:\n"
" virtual void SetNewUks(const " << aVectorName << " & aVUks) override\n"
" {\n"
" for (size_t i=0; i<this->NbUk(); i++)\n"
" this->mVUk[this->mNbInBuf][i] = aVUks[i];\n"
" }\n"
" virtual void SetNewObs(const " << aVectorName << " & aVObs) override\n"
" {\n"
" for (size_t i=0; i<this->NbObs(); i++)\n"
" this->mVObs[this->mNbInBuf][i] = aVObs[i];\n"
" }\n"
" virtual void DoEval() override;\n"
" std::vector<" << aVectorName << "> mVUk;\n"
" std::vector<" << aVectorName << "> mVObs;\n"
"};\n"
"\n";
if (! isTemplated) {
aOs << "} // namespace NS_SymbolicDerivative\n";
aOs = std::ofstream(mDirGenCode+aFileName + ".cpp");
if (!aOs)
return "";
aOs << "#include \"" + aFileName + ".h\"\n"
"\n"
"namespace NS_SymbolicDerivative {\n"
"\n"
"void " << aClassName << "::DoEval()\n";
} else {
aOs << "\n"
"template<typename TypeElem>\n"
"void " << aClassName << "<TypeElem>::DoEval()\n";
}
aOs << "{\n"
"#ifdef _OPENMP\n"
"#pragma omp parallel for\n"
"#endif\n"
" for (size_t aK=0; aK < this->mNbInBuf; aK++) {\n"
"// Declare local vars in loop to make them per thread\n";
for (auto & aForm : mVFormUnknowns)
aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
for (const auto & aForm : mVFormObservations)
aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
if (isShortExpr) {
for (const auto & aForm : mVReachedF) {
if (!aForm->isAtomic())
aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeShortExpr() << ";\n";
}
for (size_t i=0; i<mVCurF.size(); i++)
aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeFormName() << ";\n";
} else {
for (const auto & aForm : mVReachedF) {
if (aForm->UsedCnt() != 1 && !aForm->isAtomic()) {
aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
}
}
for (size_t i=0; i<mVCurF.size(); i++)
aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeRef() << ";\n";
}
aOs << " }\n"
"}\n\n";
if (mUseAllocByName)
{
aOs << "cCalculator<" << aTypeName << "> * Alloc_" << aName << "(int aSzBuf)\n"
<< "{\n"
<< " return new c" << aName << "(aSzBuf);\n"
<< "}\n\n"
<< "cName2Calc<" << aTypeName << "> TheNameAlloc_" << aName <<"(\""<< aName <<"\",Alloc_" << aName<< ");\n\n";
}
aOs << "} // namespace NS_SymbolicDerivative\n";
return aFileName;
}
template<>
inline std::string cCoordinatorF<double>::TypeElemName() const {return "double";}
template<>
inline std::string cCoordinatorF<float>::TypeElemName() const {return "float";}
template<typename T>
struct Detect_if_TypeElemName_is_defined : std::false_type
{ };
template<class TypeElem>
inline std::string cCoordinatorF<TypeElem>::TypeElemName() const
{
static_assert( Detect_if_TypeElemName_is_defined<TypeElem>::value , "** You must define cCoordinatorF::TypeElemName() for you type **");
return "";
}
} // NS_Symbolic_Derivative
#include "SymbDer_UnaryOp.h"
#include "SymbDer_BinaryOp.h"
/*
https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
http://en.wikipedia.org/wiki/Automatic_differentiation
https://git.irc.umbc.edu/photorig/openMVG/blob/260584fda68dce095e279362efd24a2d7d7cf5d9/src/third_party/ceres-solver/include/ceres/jet.h
https://mc-stan.org/
http://www.met.reading.ac.uk/clouds/adept/array_features.html
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.89.7749&rep=rep1&type=pdf
http://www.autodiff.org/
*/
#endif // _SymbolicDerivatives_H_
|
nodal_residualbased_elimination_builder_and_solver.h
|
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
typedef WeakPointerVector<Node<3> > NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void BuildNodally(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
/* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
double secondLame=0;
double volumetricCoeff=0;
double density=0;
double theta=0.5;
double accX=0;
double accY=0;
double accZ=0;
double sigmaXX=0;
double sigmaYY=0;
double sigmaZZ=0;
double sigmaXY=0;
double sigmaXZ=0;
double sigmaYZ=0;
double pressure=0;
double dNdXi=0;
double dNdYi=0;
double dNdZi=0;
double dNdXj=0;
double dNdYj=0;
double dNdZj=0;
unsigned int firstRow=0;
unsigned int firstCol=0;
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size()+1;
if(neighSize>1){
const double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
LHS_Contribution= ZeroMatrix(localSize,localSize);
RHS_Contribution= ZeroVector(localSize);
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
density=itNode->FastGetSolutionStepValue(DENSITY);
if(itNode->Is(SOLID)){
secondLame = timeInterval*itNode->FastGetSolutionStepValue(YOUNG_MODULUS)/(1.0+itNode->FastGetSolutionStepValue(POISSON_RATIO))*0.5;
volumetricCoeff = timeInterval*itNode->FastGetSolutionStepValue(POISSON_RATIO)*itNode->FastGetSolutionStepValue(YOUNG_MODULUS)/
((1.0+itNode->FastGetSolutionStepValue(POISSON_RATIO))*(1.0-2.0*itNode->FastGetSolutionStepValue(POISSON_RATIO))) + 2.0*secondLame/3.0;
}
else if(itNode->Is(FLUID)){
secondLame = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
volumetricCoeff = timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS);
/* volumetricCoeff*=0.000025;//dam break fine */
/* volumetricCoeff*=0.04;//sloshing coarse */
/* volumetricCoeff*=0.0055;//sloshing coarse */
double bulkReduction=density*nodalVolume/(timeInterval*volumetricCoeff);
volumetricCoeff*=bulkReduction;
/* volumetricCoeff*=0.000042;//sloshing fine */
/* volumetricCoeff*=0.0000005; */
/* std::cout<<"bulkReduction "<<bulkReduction<<std::endl; */
/* volumetricCoeff*=0.0002; */
}
firstRow=0;
firstCol=0;
/* const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); */
if(dimension==2){
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
accX=2.0*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION_X,0);
accY=2.0*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION_Y,0);
RHS_Contribution[0]+=-nodalVolume*density*accX;
RHS_Contribution[1]+=-nodalVolume*density*accY;
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
//-------- INTERNAL FORCES TERM -------//
sigmaXX=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[0];
sigmaYY=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[1];
sigmaXY=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[2];
if(itNode->IsNot(SOLID)){
pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta);
sigmaXX=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
sigmaYY=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
}
const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xpos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xpos+1).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1];
RHS_Contribution[firstCol] += - nodalVolume * (dNdXi*sigmaXX + dNdYi*sigmaXY);
RHS_Contribution[firstCol+1]+= - nodalVolume * (dNdYi*sigmaYY + dNdXi*sigmaXY);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1];
LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * secondLame + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * secondLame )*theta;
LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * secondLame )*theta;
LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * secondLame )*theta;
LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * secondLame + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * secondLame )*theta;
firstRow+=2;
}
firstRow=0;
firstCol+=2;
if(i<neighb_nodes.size()){
EquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xpos).EquationId();
EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xpos+1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}else if(dimension==3){
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(2,2)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
accX=2.0*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION_X,0);
accY=2.0*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION_Y,0);
accZ=2.0*(itNode->FastGetSolutionStepValue(VELOCITY_Z,0)-itNode->FastGetSolutionStepValue(VELOCITY_Z,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION_Z,0);
RHS_Contribution[0]+=-nodalVolume*density*accX;
RHS_Contribution[1]+=-nodalVolume*density*accY;
RHS_Contribution[2]+=-nodalVolume*density*accZ;
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
RHS_Contribution[2]+=nodalVolume*density*VolumeAcceleration[2];
//-------- INTERNAL FORCES TERM -------//
sigmaXX=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[0];
sigmaYY=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[1];
sigmaZZ=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[2];
sigmaXY=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[3];
sigmaXZ=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[4];
sigmaYZ=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS)[5];
if(itNode->IsNot(SOLID)){
pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta);
sigmaXX=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
sigmaYY=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
sigmaZZ=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure;
}
const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xpos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xpos+1).EquationId();
EquationId[2]=itNode->GetDof(VELOCITY_Z,xpos+2).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1];
dNdZi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+2];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi*sigmaXX + dNdYi*sigmaXY + dNdZi*sigmaXZ);
RHS_Contribution[firstCol+1]+= -nodalVolume * (dNdYi*sigmaYY + dNdXi*sigmaXY + dNdZi*sigmaYZ);
RHS_Contribution[firstCol+2]+= -nodalVolume * (dNdZi*sigmaZZ + dNdXi*sigmaXZ + dNdYi*sigmaYZ);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1];
dNdZj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+2];
LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * secondLame + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi )* secondLame )*theta;
LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * secondLame )*theta;
LHS_Contribution(firstRow,firstCol+2) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * secondLame )*theta;
LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * secondLame )*theta;
LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * secondLame + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi )* secondLame )*theta;
LHS_Contribution(firstRow+1,firstCol+2)+= nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * secondLame )*theta;
LHS_Contribution(firstRow+2,firstCol) += nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * secondLame )*theta;
LHS_Contribution(firstRow+2,firstCol+1)+= nodalVolume * ( (nTwoThirds * secondLame + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * secondLame )*theta;
LHS_Contribution(firstRow+2,firstCol+2)+= nodalVolume * ( (FourThirds * secondLame + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi )* secondLame )*theta;
firstRow+=3;
}
firstRow=0;
firstCol+=3;
if(i<neighb_nodes.size()){
EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xpos).EquationId();
EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xpos+1).EquationId();
EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xpos+2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
/* boost::timer m_build_time; */
BuildNodally(pScheme, rModelPart, A, b);
/* std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; */
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
/* boost::timer m_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType& pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(it->get());
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
/* boost::timer m_contruct_matrix; */
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
/* KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); */
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
/* std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; */
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
#ifdef _OPENMP
,std::vector< omp_lock_t >& lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
/* std::cout<<" ConstructMatrixStructure for Momentum equation"<<std::endl; */
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
#ifdef USE_GOOGLE_HASH
std::vector<google::dense_hash_set<std::size_t> > indices(equation_size);
const std::size_t empty_key = 2 * equation_size + 10;
#else
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#endif
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
#ifdef USE_GOOGLE_HASH
indices[iii].set_empty_key(empty_key);
#else
indices[iii].reserve(40);
#endif
}
Element::EquationIdVectorType EquationId;
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol=0;
const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xpos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xpos+1).EquationId();
if(dimension==3)
EquationId[2]=itNode->GetDof(VELOCITY_Z,xpos+2).EquationId();
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i< neighb_nodes.size(); i++)
{
firstCol+=dimension;
EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xpos).EquationId();
EquationId[firstCol+1] =neighb_nodes[i].GetDof(VELOCITY_Y,xpos+1).EquationId();
if(dimension==3){
EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xpos+2).EquationId();
}
}
for (std::size_t i = 0; i < EquationId.size(); i++)
{
if (EquationId[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[EquationId[i]]);
#endif
auto& row_indices = indices[EquationId[i]];
for (auto it = EquationId.begin(); it != EquationId.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[EquationId[i]]);
#endif
}
}
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++)
{
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId( *(i_condition.base()) , ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
{
if (ids[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[ids[i]]);
#endif
auto& row_indices = indices[ids[i]];
for (auto it = ids.begin(); it != ids.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[ids[i]]);
#endif
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
/* std::cout<<"..... ConstructMatrixStructure for Momentum equation DONE"<<std::endl; */
}
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector< omp_lock_t > mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
GB_unop__sinh_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fp32_fp32)
// op(A') function: GB (_unop_tran__sinh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h
|
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *
getCurrentMangleNumberContext(const DeclContext *DC,
Decl *&ManglingContextDecl,
bool SkpNoODRChk = false,
bool *Forced = nullptr);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
ConceptDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
struct OpenMPDeclareVariantCtsSelectorData {
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet =
OMPDeclareVariantAttr::CtxSetUnknown;
OMPDeclareVariantAttr::CtxSelectorType Ctx =
OMPDeclareVariantAttr::CtxUnknown;
StringRef ImplVendor;
ExprResult CtxScore;
explicit OpenMPDeclareVariantCtsSelectorData() = default;
explicit OpenMPDeclareVariantCtsSelectorData(
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet,
OMPDeclareVariantAttr::CtxSelectorType Ctx, StringRef ImplVendor,
ExprResult CtxScore)
: CtxSet(CtxSet), Ctx(Ctx), ImplVendor(ImplVendor), CtxScore(CtxScore) {
}
};
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
const Sema::OpenMPDeclareVariantCtsSelectorData &Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
O10BlkRed.c
|
#include <mpi.h>
#include "grid.h"
extern char *restrict levelMask;
extern char *restrict * restrict levMsk;
extern GVAL *restrict vcflMax;
extern GVAL vcflMaxVal;
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_dvg;
void O10BlkRed(GRID * g)
{
{
size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < (g->height); height_index++) {
for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) {
if (gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] > 0.0) {
levMsk[block_index][height_index] = 1;
levelMask[height_index] = 1;
vcflMax[block_index] = vcflMax[block_index] > gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] ? vcflMax[block_index] : gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)];
}
}
}
}
}
for (int b = 0; b < g->cBlkCnt; b++) {
vcflMaxVal = vcflMaxVal > vcflMax[b] ? vcflMaxVal : vcflMax[b];
}
}
|
for_misc_messages.c
|
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -triple x86_64-unknown-unknown -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -triple x86_64-unknown-unknown -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -triple x86_64-unknown-unknown -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -triple x86_64-unknown-unknown -verify=expected,omp50 %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp for
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for foo
void test_no_clause() {
int i;
#pragma omp for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for' must be a for loop}}
#pragma omp for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for foo bar
for (i = 0; i < 16; ++i)
;
// At one time, this failed an assert.
// expected-error@+1 {{unexpected OpenMP clause 'num_teams' in directive '#pragma omp for'}}
#pragma omp for num_teams(3)
for (i = 0; i < 16; ++i)
;
// At one time, this error was reported twice.
// expected-error@+1 {{unexpected OpenMP clause 'uniform' in directive '#pragma omp for'}}
#pragma omp for uniform
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'if' in directive '#pragma omp for'}}
#pragma omp for if(0)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp parallel
#pragma omp for linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
#pragma omp for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp for collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp for collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for collapse(2)
for (i = 0; i < 16; ++i) // expected-note {{defined as private}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp for
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
#pragma omp for order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for ordered order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for'}} omp50-error {{'order' clause with 'concurrent' modifier cannot be specified if an 'ordered' clause is specified}} omp50-note {{'ordered' clause}}
for (int i = 0; i < 10; ++i)
;
}
|
compute.c
|
#include <stdlib.h>
#include <omp.h>
#include "compute.h"
tsp_solution_t* compute(tsp_search_t* global_search, int ncores)
{
/**
* Expand the global_search list if the are too few nodes.
* This will only happen for small values of N.
* I don't think this will become the bottleneck.
*/
while (global_search->list->length > 0 && global_search->list->length < ncores)
tsp_search_iterate(global_search, TSP_SEARCH_BREADTH_FIRST);
tsp_search_t** all_local_searches = (tsp_search_t**) malloc(ncores * sizeof(tsp_search_t*));
for (int core = 0; core < ncores; core++)
{
all_local_searches[core] = (tsp_search_t*) malloc(sizeof(tsp_search_t));
all_local_searches[core]->problem = global_search->problem;
all_local_searches[core]->optimum = tsp_solution_cpy(global_search->optimum);
all_local_searches[core]->list = list_new(NULL);
}
int core = 0;
while (global_search->list->length)
{
list_enqueue(all_local_searches[core]->list, list_dequeue(global_search->list));
core = (core + 1) % ncores;
}
/**
* Expand search tree nodes depth-first so as to try and save some RAM.
* https://www.quora.com/Why-is-DFS-usually-more-space-efficient-than-BFS
*/
#pragma omp parallel num_threads(ncores)
{
int my_core = omp_get_thread_num();
tsp_search_t* my_local_search = all_local_searches[my_core];
while (my_local_search->list->length)
tsp_search_iterate(my_local_search, TSP_SEARCH_DEPTH_FIRST);
}
tsp_solution_t* global_optimum = all_local_searches[0]->optimum;
for (int core = 1; core < ncores; core++)
if (!global_optimum)
global_optimum = all_local_searches[core]->optimum;
else if (all_local_searches[core]->optimum)
if (all_local_searches[core]->optimum->cost < global_optimum->cost)
global_optimum = all_local_searches[core]->optimum;
if (global_search->optimum)
tsp_solution_del(global_search->optimum);
global_search->optimum = tsp_solution_cpy(global_optimum);
char my_local_optimum_string[TSP_SOLUTION_STRING_MAX];
tsp_solution_to_string(global_search->optimum, my_local_optimum_string);
for (int core = 0; core < ncores; core++)
{
if (all_local_searches[core]->optimum)
tsp_solution_del(all_local_searches[core]->optimum);
list_del(all_local_searches[core]->list);
free(all_local_searches[core]);
}
free(all_local_searches);
return global_search->optimum;
}
|
a12_bad.c
|
#define N 1000000
#define MAX 4
int a[N],b[N],ind[N];
long long s=0;
main()
{
int i;
/* inicialitzacio, no en paral.lel */
for(i=0;i<N;i++)
{
a[i]=1;
b[i]=2;
ind[i]=i%MAX;
}
#pragma omp parallel for
for (i=0;i<N;i++)
b[ind[i]] += a[i];
for (i=0;i<MAX;i++)
{
printf("Valor %d, de b %d \n",i,b[i]);
s+=b[i];
}
printf("Suma total de b: %ld\n",s);
}
|
GB_unop__log10_fp32_fp32.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log10_fp32_fp32)
// op(A') function: GB (_unop_tran__log10_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log10f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log10f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log10f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG10 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log10_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log10f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log10f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log10_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
text_parser.h
|
/*!
* Copyright (c) 2015 by Contributors
* \file text_parser.h
* \brief iterator parser to parse text format
* \author Tianqi Chen
*/
#ifndef DMLC_DATA_TEXT_PARSER_H_
#define DMLC_DATA_TEXT_PARSER_H_
#include <dmlc/data.h>
#include <dmlc/omp.h>
#include <vector>
#include <cstring>
#include <algorithm>
#include "./row_block.h"
#include "./parser.h"
namespace dmlc {
namespace data {
/*!
* \brief Text parser that parses the input lines
* and returns rows in input data
*/
template <typename IndexType>
class TextParserBase : public ParserImpl<IndexType> {
public:
explicit TextParserBase(InputSplit *source,
int nthread)
: bytes_read_(0), source_(source) {
int maxthread = std::max(omp_get_num_procs() / 2 - 4, 1);
nthread_ = std::min(maxthread, nthread);
}
virtual ~TextParserBase() {
delete source_;
}
virtual void BeforeFirst(void) {
source_->BeforeFirst();
}
virtual size_t BytesRead(void) const {
return bytes_read_;
}
virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) {
return FillData(data);
}
protected:
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
virtual void ParseBlock(const char *begin, const char *end,
RowBlockContainer<IndexType> *out) = 0;
/*!
* \brief read in next several blocks of data
* \param data vector of data to be returned
* \return true if the data is loaded, false if reach end
*/
inline bool FillData(std::vector<RowBlockContainer<IndexType>> *data);
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward, returns begin if not found
*/
static inline const char *BackFindEndLine(const char *bptr, const char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r')
return bptr;
}
return begin;
}
/*!
* \brief Ignore UTF-8 BOM if present
* \param begin reference to begin pointer
* \param end reference to end pointer
*/
static inline void IgnoreUTF8BOM(const char **begin, const char **end) {
int count = 0;
for (count = 0; *begin != *end && count < 3; count++, ++*begin) {
if (!begin || !*begin)
break;
if (**begin != '\xEF' && count == 0)
break;
if (**begin != '\xBB' && count == 1)
break;
if (**begin != '\xBF' && count == 2)
break;
}
if (count < 3)
*begin -= count;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// source split that provides the data
InputSplit *source_;
// exception_ptr to hold exception thrown in OMP threads
std::exception_ptr parser_exception_;
// mutex for the exception_ptr
std::mutex mutex_exception_;
};
// implementation
template <typename IndexType>
inline bool TextParserBase<IndexType>::FillData(
std::vector<RowBlockContainer<IndexType> > *data) {
InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
const int nthread = omp_get_max_threads();
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
CHECK_NE(chunk.size, 0U);
const char *head = reinterpret_cast<char *>(chunk.dptr);
#pragma omp parallel num_threads(nthread)
{
try {
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
const char *pbegin = BackFindEndLine(head + sbegin,
head);
const char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send,
head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
} catch (dmlc::Error& ex) {
{
std::lock_guard<std::mutex> lock(mutex_exception_);
if (!parser_exception_) {
parser_exception_ = std::current_exception();
}
}
}
}
if (parser_exception_) {
std::rethrow_exception(parser_exception_);
}
this->data_ptr_ = 0;
return true;
}
} // namespace data
} // namespace dmlc
#endif // DMLC_DATA_TEXT_PARSER_H_
|
gbdt.h
|
/*!
* Original work Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Modified work Copyright (c) 2020 Fabio Sigrist. All rights reserved.
* Licensed under the Apache License Version 2.0 See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <string>
#include <algorithm>
#ifndef AVOID_NOT_CRAN_COMPLIANT_CALLS
#include <cstdio>
#endif
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include <LightGBM/json11.hpp>
#include "score_updater.hpp"
using namespace json11;
namespace LightGBM {
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequence of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_preb_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
if (num_iteration > 0) {
num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration));
} else {
num_preb_in_one_row *= max_iteration;
}
} else if (is_pred_contrib) {
num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_preb_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_);
}
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
/*! \brief Nesterov schedule */
inline double NesterovSchedule(int iter, int momentum_schedule_version = 0,
double nesterov_acc_rate = 0.5, int momentum_offset = 0) const {
if (iter < momentum_offset) {
return(0.);
}
else {
if (momentum_schedule_version == 0) {
return(nesterov_acc_rate);
}
else if (momentum_schedule_version == 1) {
return(1. - (3. / (6. + iter)));
}
else {
return(0.);
}
}
}
protected:
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
/*!
* \brief Helper function for bagging, used for multi-threading optimization
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief Helper function for bagging, used for multi-threading optimization, balanced sampling
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current interation
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief First order derivative of training data */
std::vector<score_t> gradients_;
/*! \brief Secend order derivative of training data */
std::vector<score_t> hessians_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> tmp_indices_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
/*! \brief number of threads */
int num_threads_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> offsets_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_write_pos_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_write_pos_buf_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
/*! \brief If true, Nesterov acceleration is used for boosting */
bool use_nesterov_acc_ = false;
/*! \brief Acceleration rate for momentum step in Nesterov step */
double nesterov_acc_rate_ = 0.5;
/*! \brief Choose the acceleration rate schedule */
int momentum_schedule_version_ = 0;
/*! \brief Acceleration rate is zero before the offset number */
int momentum_offset_ = 0;
/*! \brief If true, a Newton update step is done for the tree leaves after the gradient step (only releveant for GPBoost algorithm, i.e. if objective_function_->HasGPModel()==true) */
bool leaves_newton_update_ = false;
Json forced_splits_json_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
common.h
|
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <iomanip>
#include <iterator>
#include <memory>
#include <sstream>
#include <type_traits>
#include <utility>
#include <vector>
#ifdef _MSC_VER
#include "intrin.h"
#endif
namespace LightGBM {
namespace Common {
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static unsigned CountDecimalDigit32(uint32_t n) {
#if defined(_MSC_VER) || defined(__GNUC__)
static const uint32_t powers_of_10[] = {
0,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000
};
#ifdef _MSC_VER
unsigned long i = 0;
_BitScanReverse(&i, n | 1);
uint32_t t = (i + 1) * 1233 >> 12;
#elif __GNUC__
uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
#endif
return t - (n < powers_of_10[t]) + 1;
#else
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
if (n < 1000000000) return 9;
return 10;
#endif
}
inline static void Uint32ToStr(uint32_t value, char* buffer) {
const char kDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'
};
unsigned digit = CountDecimalDigit32(value);
buffer += digit;
*buffer = '\0';
while (value >= 100) {
const unsigned i = (value % 100) << 1;
value /= 100;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
if (value < 10) {
*--buffer = static_cast<char>(value) + '0';
} else {
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
}
inline static void Int32ToStr(int32_t value, char* buffer) {
uint32_t u = static_cast<uint32_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
Uint32ToStr(u, buffer);
}
inline static void DoubleToStr(double value, char* buffer, size_t
#ifdef _MSC_VER
buffer_len
#endif
) {
#ifdef _MSC_VER
sprintf_s(buffer, buffer_len, "%.17g", value);
#else
sprintf(buffer, "%.17g", value);
#endif
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float, bool is_unsign>
struct __TToStringHelperFast {
void operator()(T value, char* buffer, size_t) const {
Int32ToStr(value, buffer);
}
};
template<typename T>
struct __TToStringHelperFast<T, true, false> {
void operator()(T value, char* buffer, size_t
#ifdef _MSC_VER
buf_len
#endif
) const {
#ifdef _MSC_VER
sprintf_s(buffer, buf_len, "%g", value);
#else
sprintf(buffer, "%g", value);
#endif
}
};
template<typename T>
struct __TToStringHelperFast<T, false, true> {
void operator()(T value, char* buffer, size_t) const {
Uint32ToStr(value, buffer);
}
};
template<typename T>
inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper;
const size_t buf_len = 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
const size_t buf_len = 32;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
DoubleToStr(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
DoubleToStr(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK(strs.size() == static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<>
inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(keys->at(i), values->at(i));
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys->at(i) = arr[i].first;
values->at(i) = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = data->at(i).data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (std::isnan(x)) {
return 0.0;
} else if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n) {
int size = n / 32;
if (n % 32 != 0) ++size;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
vec->at(i1) |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);;
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
inline bool CheckASCII(const std::string& s) {
for (auto c : s) {
if (static_cast<unsigned char>(c) > 127) {
return false;
}
}
return true;
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
8354.c
|
/* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <[email protected]>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(static, 16)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 16)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
no_loop_2.c
|
#include <stdio.h>
#include <omp.h>
#pragma omp declare target
int foo(int i) { return i+1; }
#pragma omp end declare target
int main()
{
int N = 1000000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
b[i]=i;
for (i=0; i<N; i++)
a[i]=0;
int j;
#pragma omp target teams distribute parallel for
{
for (j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++)
a[k]=b[k];
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
a[k]=b[k];
foo(k);
}
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
a[k]=b[k];
omp_get_num_teams();
}
}
#pragma omp target teams distribute parallel for
{
for (int k = 0; k< N; k++) {
#pragma omp simd
for (int p = 0; p < N; p++)
a[k]=b[k];
}
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 ConstWGSize:512 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 512)
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 ConstWGSize:512 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 512)
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:4 ConstWGSize:512 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 512)
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2 ConstWGSize:512 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 512)
/// CHECK: DEVID:[[S:[ ]*]][[DEVID:[0-9]+]] SGN:2 ConstWGSize:512 args: 5 teamsXthrds:([[S:[ ]*]][[NUM_TEAMS:[0-9]+]]X 512)
|
calc_wind.c
|
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <omp.h>
#include "breshen.h"
#include "wind_header.h"
#define MAX_SIZE 500
#define PI 3.141592654
/* ------------------------------------------------------------------------- */
/*
* Define an array struct that will dynamically allocate memory
* http://stackoverflow.com/questions/3536153/c-dynamically-growing-array
*/
/*
typedef struct {
int *data;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->data = (int *)malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size *= 2;
a->data = (int *)realloc(a->data, a->size * sizeof(int));
}
a->data[a->used++] = element;
}
void freeArray(Array *a) {
free(a->data);
a->data = NULL;
a->used = a->size = 0;
}
*/
void calc_maxus(nx, ny, x, y, z, X_start, Y_start, X_end, Y_end, height, nthreads, maxus)
int nx; /* number of grid cells x dir*/
int ny; /* number of grid cells y dir*/
double *x; /* X coordinates of the matrix */
double *y; /* Y coordinates of the matrix */
double *z; /* Z (elevation) of the matrix */
double *X_start; /* matrix of start point indicies */
double *Y_start; /* matrix of start point indicies */
double *X_end; /* matrix of start point indicies */
double *Y_end; /* matrix of start point indicies */
double height; /* instrument height */
int nthreads; /* number of threads for parrallel processing */
double *maxus; /* output maxus */
{
int i, j, ngrid, line_length;
int start_x, start_y, end_x, end_y;
int xcoords[MAX_SIZE], ycoords[MAX_SIZE];
double mxs;
ngrid = nx * ny;
omp_set_dynamic(100); // give 100 cells to each processor at a time
omp_set_num_threads(nthreads); // Use N threads for all consecutive parallel regions
#pragma omp parallel shared(nx, ny, x, y, z, X_start, Y_start, X_end, Y_end, height, maxus) \
private(i, j, start_x, start_y, end_x, end_y, xcoords, ycoords, line_length, mxs)
{
#pragma omp for
for (i = 0; i < ngrid; i++) {
// Determine the start and end points
start_x = X_start[i];
start_y = Y_start[i];
end_x = X_end[i];
end_y = Y_end[i];
// int dx, dy;
// dy = 31;
// dx = 4980;
// if (start_x == dx && start_y == dy) {
// printf("x=%i, y=%i\n", start_x, start_y);
// printf("x=%i, y=%i\n", end_x, end_y);
// }
// determine the points along the line
line_length = find_line(start_x, start_y, end_x, end_y, nx, ny, xcoords, ycoords);
// determine the elevations along the line
double xl[line_length], yl[line_length], elev[line_length];
int n = 0;
for (j = 0; j < line_length; j++) {
// check to ensure that the points are within the modeling domain
if (xcoords[j] >= 0 && xcoords[j] <= nx-1 && ycoords[j] >= 0 && ycoords[j] <= ny-1){
xl[j] = x[xcoords[j]];
yl[j] = y[ycoords[j]];
elev[j] = z[ycoords[j]*nx + xcoords[j]];
// if (start_x == dx && start_y == dy) {
// printf("%f\n", elev[j]);
// }
n++;
}
}
// calculate the maximum upwind slope along the line
maxus[i] = hord(n, xl, yl, elev, height);
// if (start_x == dx && start_y == dy) {
// printf("%f\n", maxus[i]);
// }
}
}
}
/* ------------------------------------------------------------------------- */
int find_line(start_x, start_y, end_x, end_y, nx, ny, xcoords, ycoords)
int start_x; /* start x coordinate */
int start_y; /* start y coordinate */
int end_x; /* end x coordinate */
int end_y; /* end y coordinate */
int nx; /* number of points in x coordinates */
int ny; /* number of points in y coordinates */
int xcoords[]; /* array of x coordinates */
int ycoords[]; /* array of y coordinates */
{
int N;
short *X, *Y, nextX, nextY;
struct BreshenhamData *LineData, Initialize;
// Initialize the bresham calculation
nextY = (short) start_y;
nextX = (short) start_x;
X = &nextX;
Y = &nextY;
LineData = &Initialize;
Initialize.SlopeType = 0;
xcoords[0] = start_x;
ycoords[0] = start_y;
N = 1;
while (((int)*X != end_x || (int)*Y != end_y) && N < MAX_SIZE)
{
// Check to see if the values are within the boundaries
if (*X > nx || *X < 0 || *Y > ny || *Y < 0) {
break;
}
GetNextCellCoordinate((short)start_x, (short)start_y, (short)end_x,\
(short)end_y, X, Y, LineData);
xcoords[N] = nextX;
ycoords[N] = nextY;
++N;
}
return N;
}
/* ------------------------------------------------------------------------- */
/*
* Find the maximum upwind slope from the point using the methods developed
* in hor1d described by Dozier 1981
*/
double hord(N, x, y, z, height)
int N; /* line length */
double *x; /* x coordinates along line */
double *y; /* y coordinates along line */
double *z; /* elevation along line */
double height; /* instrument height */
{
int i, j, found;
int H[N]; // index to current points horizon
double slope_ij, slope_hj;
double hordeg;
H[N - 1] = N - 1;
i = N - 2;
while ( i >= 0 )
{
j = i + 1;
found = 0;
while (found == 0)
{
slope_ij = slope(x[i], y[i], z[i], x[j], y[j], z[j], height);
slope_hj = slope(x[j], y[j], z[j], x[H[j]], y[H[j]], z[H[j]], height);
// slope_ij = SLOPE(i, j, xcoords, ycoords, z, height);
// slope_hj = SLOPE(j, H[j], xcoords, ycoords, z, height);
if (slope_ij < slope_hj)
{
if (j == N - 1)
{
found = 1;
H[i] = j;
}
else
{
j = H[j];
}
}
else
{
found = 1;
if (slope_ij > slope_hj) {
H[i] = j;
} else {
H[i] = H[j];
}
}
}
--i;
}
slope_hj = slope(x[0], y[0], z[0], x[H[0]], y[H[0]], z[H[0]], height);
hordeg = atan(slope_hj) / PI * 180;
// hordeg = H[0];
return hordeg;
}
double slope (x1, y1, z1, x2, y2, z2, height)
double x1; /* Point 1 x,y,z */
double y1;
double z1;
double x2; /* Point 2 x,y,z */
double y2;
double z2;
double height;
{
double rise, run;
rise = z2 - ( height + z1 );
run = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
return (rise/run);
}
//float SLOPE (l, m, xcoords, ycoords, elevs, height)
//int l;
//int m;
//int xcoords[];
//int ycoords[];
//int elevs[];
//int height;
//{
// int rise;
// float run, slop, cellsize=30;
//
// rise = elevs[m] - ( height + elevs[l] );
// // rise = 10;
// // run = 10.0;
// run = sqrt((xcoords[l] - xcoords[m]) * (xcoords[l] - xcoords[m]) + \
// (ycoords[l] - ycoords[m]) * (ycoords[l] - ycoords[m])) * cellsize;
// slop = rise/run;
// return slop;
//}
|
image_handler.h
|
#include "parameters.h"
class ImageHandler
{
public:
ros::NodeHandle nh;
ros::Publisher pub_image;
cv::Mat image_range;
cv::Mat image_noise;
cv::Mat image_intensity;
pcl::PointCloud<PointType>::Ptr cloud_track;
ImageHandler()
{
cloud_track.reset(new pcl::PointCloud<PointType>());
cloud_track->resize(IMAGE_HEIGHT * IMAGE_WIDTH);
pub_image = nh.advertise<sensor_msgs::Image>("loop_detector/image_stack", 1);
}
void cloud_handler(const sensor_msgs::PointCloud2ConstPtr &cloud_msg)
{
// convert cloud
pcl::PointCloud<PointOuster>::Ptr laser_cloud(new pcl::PointCloud<PointOuster>());
pcl::fromROSMsg(*cloud_msg, *laser_cloud);
assert((int)laser_cloud->size() % IMAGE_HEIGHT * IMAGE_WIDTH == 0);
// reset images
image_range = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
image_noise = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
image_intensity = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
#pragma omp parallel for num_threads(NUM_THREADS)
for (int u = 0; u < IMAGE_HEIGHT; u++)
{
for (int v = 0; v < IMAGE_WIDTH; v++)
{
const auto& pt = laser_cloud->points[u * IMAGE_WIDTH + v];
// extract sensor data
float range = std::sqrt(pt.x*pt.x + pt.y*pt.y + pt.z*pt.z);
float noise = pt.noise;
float intensity = pt.intensity;
// limit to (0~255)
noise = std::min(noise, 255.0f);
intensity = std::min(intensity, 255.0f);
// update all images
image_range.at<uint8_t>(u, v) = std::min(range * 20, 255.0f);
image_noise.at<uint8_t>(u, v) = noise;
image_intensity.at<uint8_t>(u, v) = intensity;
// update cloud
PointType* p = &cloud_track->points[u * IMAGE_WIDTH + v];
if (range >= 0.1)
{
p->x = pt.x;
p->y = pt.y;
p->z = pt.z;
p->intensity = intensity;
}
else
{
p->x = p->y = p->z = p->intensity = 0;
}
}
}
if (pub_image.getNumSubscribers() != 0)
{
// option 1: display intensity image
// cv::Mat image_visualization = image_intensity.clone();
// cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB);
// pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8");
// option 2: display all images from available lidar channels
cv::Mat image_visualization;
cv::vconcat(image_noise, image_intensity, image_visualization);
cv::vconcat(image_visualization, image_range, image_visualization);
cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB);
cv::putText(image_visualization, "Ambient", cv::Point2f(5, 20 + IMAGE_HEIGHT*0), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
cv::putText(image_visualization, "Intensity", cv::Point2f(5, 20 + IMAGE_HEIGHT*1), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
cv::putText(image_visualization, "Range", cv::Point2f(5, 20 + IMAGE_HEIGHT*2), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8");
}
// static tf in case tf between base_link and lidar is missing
static tf::TransformBroadcaster tf_base_to_lidar;
static tf::Transform base_to_lidar = tf::Transform(tf::createQuaternionFromRPY(0, 0, 0), tf::Vector3(0, 0, 0));
tf_base_to_lidar.sendTransform(tf::StampedTransform(base_to_lidar, cloud_msg->header.stamp, "base_link", "velodyne"));
}
void pubImage(ros::Publisher *this_pub, const cv::Mat& this_image, std_msgs::Header this_header, string image_format)
{
static cv_bridge::CvImage bridge;
bridge.header = this_header;
bridge.encoding = image_format;
bridge.image = this_image;
this_pub->publish(bridge.toImageMsg());
}
};
|
mpncecat.c
|
/* $Header$ */
/* ncecat -- netCDF ensemble concatenator */
/* Purpose: Join variables across files into a new record variable */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard header files */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option a */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL; /* MPI CEWI */
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *rec_dmn_nm=NULL; /* [sng] New record dimension name */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FHhL:l:n:Oo:p:rRSt:u:v:X:x-:";
cnk_dmn_sct **cnk_dmn=NULL_CEWI;
dmn_sct *rec_dmn;
dmn_sct **dim;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
int *in_id_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx;
int jdx;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_dmn_xtr;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int rec_dmn_id=NCO_REC_DMN_UNDEFINED;
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */
lmt_sct **lmt;
lmt_all_sct **lmt_all_lst; /* List of *lmt_all structures */
long idx_rec_out=0L; /* idx_rec_out gets incremented */
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_APPEND=True; /* Option H */
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nm_id_sct *dmn_lst;
nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */
MPI_Info mpi_nfo=MPI_INFO_NULL; /* [sct] File geometry hints */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"fl_lst_in",no_argument,0,'H'},
{"file_list",no_argument,0,'H'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"nintap",required_argument,0,'n'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"ulm_nm",required_argument,0,'u'},
{"rcd_nm",required_argument,0,'u'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(mpi_cmm,&prc_nbr);
MPI_Comm_rank(mpi_cmm,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start clock and save command line */
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'H': /* Toggle writing input file list attribute */
FL_LST_IN_APPEND=!FL_LST_IN_APPEND;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'n': /* NINTAP-style abbreviation of files to process */
fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr);
if(abb_arg_nbr < 1 || abb_arg_nbr > 6){
(void)fprintf(stdout,"%s: ERROR Incorrect abbreviation for file list\n",nco_prg_nm);
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
} /* end if */
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm);
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'u': /* New record dimension name */
rec_dmn_nm=(char *)strdup(optarg);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Parse auxiliary coordinates */
if(aux_nbr > 0){
int aux_idx_nbr;
aux=nco_aux_evl(in_id,aux_nbr,aux_arg,&aux_idx_nbr);
if(aux_idx_nbr > 0){
lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *));
int lmt_nbr_new=lmt_nbr+aux_idx_nbr;
int aux_idx=0;
for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++];
lmt_nbr=lmt_nbr_new;
} /* endif aux */
} /* endif aux_nbr */
/* Get number of variables, dimensions, and record dimension ID of input file */
(void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id);
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv);
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr);
/* Fill-in dimension structure for all extracted dimensions */
dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm);
/* Dimension list no longer needed */
dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr);
/* Merge hyperslab limit information into dimension structures */
if(lmt_nbr > 0) (void)nco_dmn_lmt_mrg(dim,nbr_dmn_xtr,lmt,lmt_nbr);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++){
dmn_out[idx]=nco_dmn_dpl(dim[idx]);
(void)nco_dmn_xrf(dim[idx],dmn_out[idx]);
} /* end loop over idx */
/* Fill-in variable structure list for all extracted variables */
var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr);
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
/* Extraction list no longer needed */
xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
/* Add input file list global attribute */
if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* ncecat-specific operations */
if(True){
/* Always construct new "record" dimension from scratch */
rec_dmn=(dmn_sct *)nco_malloc(sizeof(dmn_sct));
if(rec_dmn_nm == NULL) rec_dmn->nm=rec_dmn_nm=(char *)strdup("record"); else rec_dmn->nm=rec_dmn_nm;
rec_dmn->id=-1;
rec_dmn->nc_id=-1;
rec_dmn->xrf=NULL;
rec_dmn->val.vp=NULL;
rec_dmn->is_crd_dmn=False;
rec_dmn->is_rec_dmn=True;
rec_dmn->sz=0L;
rec_dmn->cnt=0L;
rec_dmn->srd=0L;
rec_dmn->srt=0L;
rec_dmn->end=rec_dmn->sz-1L;
/* Change existing record dimension, if any, to regular dimension */
for(idx=0;idx<nbr_dmn_xtr;idx++){
/* Is any input dimension a record dimension? */
if(dmn_out[idx]->is_rec_dmn){
dmn_out[idx]->is_rec_dmn=False;
break;
} /* end if */
} /* end loop over idx */
/* Add record dimension to end of dimension list */
nbr_dmn_xtr++;
dmn_out=(dmn_sct **)nco_realloc(dmn_out,nbr_dmn_xtr*sizeof(dmn_sct **));
dmn_out[nbr_dmn_xtr-1]=rec_dmn;
} /* end if */
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
if(True){
/* Prepend record dimension to beginning of all vectors for processed variables */
for(idx=0;idx<nbr_var_prc;idx++){
var_prc_out[idx]->nbr_dim++;
var_prc_out[idx]->is_rec_var=True;
var_prc_out[idx]->sz_rec=var_prc_out[idx]->sz;
/* Allocate space to hold dimension IDs */
var_prc_out[idx]->dim=(dmn_sct **)nco_realloc(var_prc_out[idx]->dim,var_prc_out[idx]->nbr_dim*sizeof(dmn_sct *));
var_prc_out[idx]->dmn_id=(int *)nco_realloc(var_prc_out[idx]->dmn_id,var_prc_out[idx]->nbr_dim*sizeof(int));
var_prc_out[idx]->cnt=(long *)nco_realloc(var_prc_out[idx]->cnt,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->end=(long *)nco_realloc(var_prc_out[idx]->end,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->srd=(long *)nco_realloc(var_prc_out[idx]->srd,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->srt=(long *)nco_realloc(var_prc_out[idx]->srt,var_prc_out[idx]->nbr_dim*sizeof(long int));
/* Move current array by one to make room for new record dimension info */
(void)memmove((void *)(var_prc_out[idx]->dim+1),(void *)(var_prc_out[idx]->dim),(var_prc_out[idx]->nbr_dim-1)*sizeof(dmn_sct *));
(void)memmove((void *)(var_prc_out[idx]->dmn_id+1),(void *)(var_prc_out[idx]->dmn_id),(var_prc_out[idx]->nbr_dim-1)*sizeof(int));
(void)memmove((void *)(var_prc_out[idx]->cnt+1),(void *)(var_prc_out[idx]->cnt),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->end+1),(void *)(var_prc_out[idx]->end),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->srd+1),(void *)(var_prc_out[idx]->srd),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->srt+1),(void *)(var_prc_out[idx]->srt),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
/* Insert value for new record dimension */
var_prc_out[idx]->dim[0]=rec_dmn;
var_prc_out[idx]->dmn_id[0]=rec_dmn->id;
var_prc_out[idx]->cnt[0]=1L;
var_prc_out[idx]->end[0]=-1L;
var_prc_out[idx]->srd[0]=-1L;
var_prc_out[idx]->srt[0]=-1L;
} /* end loop over idx */
} /* end if */
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,mpi_cmm);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,mpi_cmm);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
(void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Close first input netCDF file */
(void)nco_close(in_id);
/* Loop over input files */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
#ifdef ENABLE_MPI
MPI_Barrier(mpi_cmm);
#endif /* !ENABLE_MPI */
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\nInput file %d is %s; ",fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"local file %s:\n",fl_in);
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
#if 0
/* fxm: netCDF4: Change to independent variable reads? */
#ifdef ENABLE_NETCDF4
rcd=nco_open_par(fl_in,NC_MPIIO|NC_NETCDF4,mpi_cmm,mpi_nfo,&in_id);
#endif /* !ENABLE_NETCDF4 */
#endif /* !0 */
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,mpi_cmm,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
TKN_WRT_FREE=True;
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,mpi_cmm);
/* msg_tag_typ != msg_tag_wrk_rqs */
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,mpi_cmm);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,mpi_cmm);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,mpi_cmm,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
var_prc_out[idx]->id=msg_bfr[2];
/* Process this variable same as UP code */
#else /* !ENABLE_MPI */
/* OpenMP with threading over variables, not files */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(nco_dbg_lvl,fl_nbr,idx_rec_out,in_id_arr,nbr_var_prc,out_id,var_prc,var_prc_out,lmt_all_lst,nbr_dmn_fl,jdx)
#endif /* !_OPENMP */
/* Process all variables in current file */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* !ENABLE_MPI */
/* Common code for UP and MPI */ /* fxm: requires C99 as is? */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"%s, ",var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Variables may have different ID, missing_value, type, in each file */
(void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Size of record dimension is 1 in output file */
var_prc_out[idx]->cnt[0]=1L;
var_prc_out[idx]->srt[0]=idx_rec_out;
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,mpi_cmm);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,mpi_cmm,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
#endif /* !ENABLE_MPI */
{ /* begin OpenMP critical */
/* Write variable into current record in output file */
if(var_prc[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc[idx]->type);
} /* end if variable is array */
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end OpenMP critical */
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end else !idx_all_wrk_ass */
} /* end while loop requesting work/token */
} /* endif Worker */
#else /* !ENABLE_MPI */
} /* end (OpenMP parallel for) loop over idx */
#endif /* !ENABLE_MPI */
idx_rec_out++; /* [idx] Index of current record in output file (0 is first, ...) */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
#ifdef ENABLE_MPI
MPI_Barrier(mpi_cmm);
#endif /* !ENABLE_MPI */
} /* end loop over fl_idx */
#ifdef ENABLE_MPI
/* Manager moves output file (closed by workers) from temporary to permanent location */
if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out);
#else /* !ENABLE_MPI */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* end !ENABLE_MPI */
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncecat-specific memory cleanup */
if(rec_dmn_nm) rec_dmn_nm=(char *)nco_free(rec_dmn_nm);
/* NB: free lmt[] is now referenced within lmt_all_lst[idx] */
for(idx=0;idx<nbr_dmn_fl;idx++)
for(jdx=0;jdx<lmt_all_lst[idx]->lmt_dmn_nbr;jdx++)
lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]);
lmt=(lmt_sct**)nco_free(lmt);
if(nbr_dmn_fl > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr-1); /* NB: ncecat has one fewer input than output dimension */
if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr);
/* Free variable lists */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
GMS_hw_metrics_time_series_analysis.h
|
#ifndef __GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__
#define __GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__
#include <cstdint>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "Timsac_iface.h"
#include "GMS_descriptive_statistics.hpp"
#include "GMS_convert_numeric_data_types.hpp"
#if !defined(DESCRIPTIVE_STATISTICS_DATA)
#define DESCRIPTIVE_STATISTICS_DATA \
float __restrict * a = NULL; \
float __restrict * df32 = NULL; \
float w = 0.0f; \
float pw = 0.0f; \
int32_t ifault = -1; \
float srsd = 0.0f; \
float svar = 0.0f; \
float skew = 0.0f; \
float kurt = 0.0f; \
float autocor = 0.0f; \
float xmid = 0.0f; \
float xmean = 0.0f; \
float xmidm = 0.0f; \
float xmed = 0.0f; \
float smin = 0.0f; \
float smax = 0.0f; \
float xrange = 0.0f; \
float xsd = 0.0f; \
float xrelsd = 0.0f; \
float xvar = 0.0f;
/*
Apply Time-Series analysis (Timsac) subroutine "CANARM".
The data itself is invariant from the point of view of
specific subroutine i.e. "CANARM".
Attempt to calculate the descritpive statistics if result
of Wilk-Shapiro normality test allows it.
*/
__attribute__((hot))
__attribute__((aligned(32)))
template<int32_t len, int32_t lagh>
void
hw_perf_metrics_canarm(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 100000, "Input data length can not exceed -- **100000** elements!!");
//const int32_t lagh = (int32_t)(std::sqrtf((int32_t)len));
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
const std::size_t lag2len = (std::size_t)(lagh*lagh);
const std::size_t lag3len = (std::size_t)lag2len*len;
constexpr float w_limit = 0.05f;
__attribute__((aligned(64))) double acor[lagh] = {};
__attribute__((aligned(64))) double acov[lagh] = {};
__attribute__((aligned(64))) double xarcoef[lagh] = {};
__attribute__((aligned(64))) double xv[lagh] = {};
__attribute__((aligned(64))) double xaic[lagh] = {};
__attribute__((aligned(64))) double xparcor[lagh] = {};
__attribute__((aligned(64))) double xdicm[lagh] = {};
__attribute__((aligned(64))) double xb[lagh] = {};
__attribute__((aligned(64))) double xa[lagh] = {};
__attribute__((aligned(64))) int32_t xm1[lagh] = {};
__attribute__((aligned(64))) int32_t xm2[lagh] = {};
__attribute__((aligned(64))) int32_t xpo[lagh] = {};
double __restrict * xw = NULL;
double __restrict * xz = NULL;
double __restrict * xRs = NULL;
double __restrict * xchi = NULL;
int32_t __restrict * xndt = NULL;
double __restrict * xdic = NULL;
FILE * fptr = NULL;
double xoaic = 0.0;
double xmean = 0.0;
int32_t xmo = 0;
int32_t xnc = 0;
int32_t xk = 0;
int32_t xl = 0;
DESCRIPTIVE_STATISTICS_DATA
const bool init = false; // swilk init argument.
// OpenMP multithreaded calls to _mm_malloc (using parallel sections)
// Multithreaded allocation for large dynamic arrays.
if(len > 10000) {
#pragma omp parallel sections
{
#pragma section
{
xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64));
}
#pragma section
{
xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
}
#pragma section
{
xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
}
#pragma section
{
xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
}
#pragma section
{
xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64));
}
#pragma section
{
xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
}
#pragma section
{
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
}
#pragma section
{
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
}
}
// Single thread checks the returned pointers!!
const bool isnull = (NULL==a) || (NULL==xdic) || (NULL==xndt) ||
(NULL==xchi) || (NULL==xRs) || (NULL==xz) ||
(NULL==xw) || (NULL==df32);
if(__builtin_expect(isnull,0)) {MALLOC_FAILED}
}
else { // for if: len < 10000
xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64));
if(__builtin_except(NULL==xw,0)) {MALLOC_FAILED}
xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
if(__builtin_except(NULL==xz,0)) {MALLOC_FAILED}
xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
if(__builtin_except(NULL==xRs,0)) {MALLOC_FAILED}
xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
if(__builtin_except(NULL==xchi,0)) {MALLOC_FAILED}
xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64));
if(__builtin_except(NULL==xndt,0)) {MALLOC_FAILED}
xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64));
if(__builtin_except(NULL==xdic,0)) {MALLOC_FAILED}
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__built_except(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_except(NULL==df32,0)) {MALLOC_FAILED}
}
autcorf_(&data[0],&len,&acov[0],&acor[0],&lagh,&xmean);
canarmf_(&len,&lagh,&acov[0],&xarcoef[0],&lagh,&xv[0],&xaic[0],&xoaic[0],
&xmo,&xparcor[0],&xnc,&xm1[0],&xm2[0],&xw[0],&xz[0],&xRs[0],
&xchi[0],&xndt[0],&xdic[0],&xdicm[0],&xpo[0],&xk,&xb[0],&xl,
&xa[0],&lagh,&lagh);
fptr = fopen(fname,"a+");
if(NULL==fptr) {
printf("File open error: %s\n",fname1);
std::exit(EXIT_FAILURE);
}
fprintf(fptr,"HW Metric name: %s\n",metric_name);
fprintf(fptr,"mo=%.16f, oaic=%.16f, nc=%.16f, k=%.16f, l=%.16f\n",xmo,xoaic,xnc,xk,xl);
fprintf(fptr, "arcoef, v, aic, parcor, dicm, b, a, m1, m2, po\n");
for(int32_t i = 0; i != lagh; ++i) {fprintf(fptr,"%.16f %.16f %.16f %.16f %.16f %.16f %.16f %d %d %.16f\n",
xarcoef[i],xv[i],xaic[i],xparcor[i],xdicm[i],xb[i],xa[i],xm1[i],xm2[i],xpo[i]);}
fprintf(fptr,"w\n");
for(int32_t i = 0; i != lag3len; ++i) {fprintf(fptr,"%.16f\n",xw[i]);}
fprintf(fptr, "z, Rs, chi, ndt, dic\n");
for(int32_t i = 0; i != lag2len; ++i) {fprintf(fptr, " %.16f %.16f %.16f %d %.16f\n",
xz[i],xRs[i],xchi[i],xndt[i],xdic[i]);}
fprintf(fptr, "End of CANARMF results dump\n");
// Sort a samples arrays in ascending order
//std::sort(data,data+len);
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
printf("Calling Shapiro-Wilk normality test subroutine!!\n");
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,n2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fptr,"Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",w,pw);
if(pw<w_limit) fprintf(fptr,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fptr,"Descriptive Statistics calculations!!\n");
fprintf(fptr,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fptr,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fptr,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fptr,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fptr,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fptr,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fptr,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fptr,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fptr,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fptr);
_mm_free(df32);
_mm_free(a);
_mm_free(xdic);
_mm_free(xndt);
_mm_free(xchi);
_mm_free(xRs);
_mm_free(xz);
_mm_free(xw);
}
/*
Apply Time-Series analysis (Timsac) subroutine "MULCOR".
The data itself is invariant from the point of view of
specific subroutine i.e. "MULCOR".
No descriptive statistics computations for this function.
*/
#include <string>
__attribute__((hot))
__attribute__((aligned(32)))
template<int32_t ndim, int32_t ldim, int32_t lagh>
void
hw_perf_metrics_mulcor(const double * __restrict __attribute__((aligned(64))) mvdata, //multivariable data
const char * __restrict fname,
const std::string * __restrict metrics){
static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!");
static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!");
//const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim));
const int32_t totlen = ndim*ldim;
const std::size_t mvd_len = (std::size_t)(lagh*ndim*ndim);
__attribute__((aligned(64))) double xmean[ndim+6];
double * __restrict xcov = NULL;
double * __restrict xcor = NULL;
FILE * fp = NULL;
xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xcov,0)) {MALLOC_FAILED}
xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED}
// Call TIMSAC MULCORF subroutine
mulcorf_(&mvdata[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"HW Metrics: %s\n",metrics[i].c_str());
fprintf(fp," HW Metrics multivariate mean\n");
for(int32_t i = 0; i != n_dim; ++i) { fprintf(fp,"%.16f\n",xmean[i]);}
fprintf(fp1," HW Metrics Multivariate Correlation and Covariance\n");
for(int32_t i = 0; i != lagh*n_dim*n_dim; ++i) {fprintf(fp,"%.16f %.16f\n",xcor[i],xcov[i]);}
fclose(fp1);
_mm_free(xcor); _mm_free(xcov);
}
/*
Apply Time-Series analysis (Timsac) subroutine "MULSPE".
The data itself is invariant from the point of view of
specific subroutine i.e. "MULSPE".
No descriptive statistics computations for this function.
*/
__attribute__((hot))
__attribute__((aligned(32)))
template<int32_t ndim, int32_t ldim, int32_t lagh>
void
hw_perf_metrics_mulspe(const double * __restrict __attribute__((aligned(64))) mvdata, // Multidimensional data
const char * __restrict fname,
const std::string * __restrict metrics) {
static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!");
static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!");
//const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim));
const std::size_t mvd_len = (std::size_t)(lagh*ndim*mdim);
const int32_t totlen = ndim*ldim;
__attribute__((aligned(64))) double xmean[ndim+6];
__attribute__((aligned(64))) double xstat[ndim];
// MULCOR data
double * __restrict xcov = NULL;
double * __restrict xcor = NULL;
// MULSPE data
double * __restrict xspec1 = NULL;
double * __restrict xspec2 = NULL;
double * __restrict xcoh1 = NULL;
double * __restrict xcoh2 = NULL;
FILE * fp = NULL;
if(__builtin_expect(mvd_len > 11000ULL,1)) {
#pragma omp parallel sections
{
#pragma omp section
{
xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
#pragma omp section
{
xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
#pragma omp section
{
xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
#pragma omp section
{
xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
#pragma omp section
{
xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
#pragma omp section
{
xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
}
}
//Single thread (main) checks for null pointers.
const bool isnull = (NULL==xcov) || (NULL==xcor) || (NULL==xspec1) ||
(NULL==xspec2) || (NULL==xcoh1) || (NULL==xcoh2);
if(__builtin_expect(isnull,0)) {MALLOC_FAILED}
}
else {
xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__bultin_expect(NULL==xcov,0)) {MALLOC_FAILED}
xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED}
xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xspec1,0)) {MALLOC_FAILED}
xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xspec2,0)) {MALLOC_FAILED}
xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xcoh1,)) {MALLOC_FAILED}
xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64));
if(__builtin_expect(NULL==xcoh2,0)) {MALLOC_FAILED}
}
// Call MULCORF subroutine
mulcorf_(&mvd_data[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]);
// Call MULSPE subroutine
mulspef_(&tot_len,&ndim,&lagh,&lagh,&xcov[0],&xspec1[0],&xspec2[0],
&xstat[0],&xcoh1[0],&xcoh2[0]);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"HW Metrics: %s\n",metrics[i].c_str());
fprintf(fp, "Spectrum real part, imaginary part\n");
for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) { fprintf(fp,"%.16f : %.16f\n",xspec1[i],xspec2[i]);}
fprintf(fp, "Test Statistics\n");
for(int32_t i = 0; i != ndim; ++i) { fprintf(fp, "%.16f\n", xstat[i]);}
fprintf(fp, "Simple coherence1, coherence2 \n");
for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) {fprintf(fp,"%.16f , %.16f\n",xcoh1[i],xcoh2[i]);}
fclose(fp);
_mm_free(xcoh2); _mm_free(xcoh1);
_mm_free(xspec2); _mm_free(xspec1);
_mm_free(xcor); _mm_free(xcov);
}
/*
Apply Time-Series analysis (Timsac) subroutine "UNIMAR".
The data itself is invariant from the point of view of
specific subroutine i.e. "UNIMAR".
Attempt to calculate the descritpive statistics if result
of Wilk-Shapiro normality test allows it.
*/
__attribute__((hot))
__attribute__((aligned(32)))
template<int32_t len, int32_t lagh>
void
hw_perf_metrics_unimar(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
__attribute__((aligned(64))) double xv[lagh+1];
__attribute__((aligned(64))) double xaic[lagh+1];
__attribute__((aligned(64))) double xdaic[lagh+1];
__attribute__((aligned(64))) double xa[lagh];
double xmean = 0.0;
double xvar = 0.0;
double xaicm = 0.0;
double xvm = 0.0;
int32_t xm = 0;
char pad[4];
FILE * fp = NULL;
DESCRIPTIVE_STATISTICS_DATA
const bool init = false; // swilk init argument.
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
unimarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0],
&xm,&xaicm,&xvm,&xa[0]);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
fprintf(fp,"HW Metric: %s, Method: Univariate Autoregressive AR Model Fitting\n",metric_name);
fprintf(fp,"\nmean=%.16f,var=%.16f,aicm=%.16f,vm=%.16f,xm=%d\n", xmean,
xvar,xaicm,xvm,xm);
fprintf(fp," V, AIC, DAIC\n");
for(int32_t i = 0; i != lagh+1; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i]);}
fprintf(fp, "A\n");
for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f\n",xa[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp);
_mm_free(df32); _mm_free(a);
}
/*
Apply Time-Series analysis (Timsac) subroutine "UNIBAR".
The data itself is invariant from the point of view of
specific subroutine i.e. "UNIBAR".
*/
__attribute__((hot))
__attribute__((aligned(32)))
template<int32_t len,int32_t lagh>
void
hw_perf_metrics_unibar(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
__attribute__((aligned(64))) double xv[lagh+1];
__attribute__((aligned(64))) double xaic[lagh+1];
__attribute__((aligned(64))) double xdaic[lagh+1];
__attribute__((aligned(64))) double xpa[lagh];
__attribute__((aligned(64))) double xbw[lagh+1];
__attribute__((aligned(64))) double xsbw[lagh];
__attribute__((aligned(64))) double xpab[lagh];
__attribute__((aligned(64))) double xa[lagh];
__attribute__((aligned(64))) double xpxx[128];
double xmean = 0.0;
double xvar = 0.0;
double xaicm = 0.0;
double xvm = 0.0;
double xaicb = 0.0;
double xvb = 0.0;
double xpn = 0.0;
int32_t xm = 0;
char pad[4];
FILE * fp = NULL;
DESCRIPTIVE_STATISTICS_DATA
const bool init = false; // swilk init argument.
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
unibarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0],
&xm,&xaicm,&xvm,&xpa[0],&xbw[0],&xsbw[0],&xpab[0],&xaicb,
&xvb,&xpn,&xa[0],&xpxx[0]);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname2);
std::exit(EXIT_FAILURE);
}
fprintf(fp," Metric: %s, Method: Univariate Bayesian Method of AR Model Fitting\n");
fprintf(fp,"\nxmean=%.16f,xvar=%.16f,xaicm=%.16f,xvm=%.16f,xaicb=%.16f,xvb=%.16f,xpn=%.16f,xm=%d\n",xmean,
xvar,xaicm,xvm,xaicb,xvb,xpn,xm);
fprintf(fp," V, AIC, DAIC, BW\n");
for(int32_t i = 0; i != (lagh+1); ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i],xbw[i]);}
fprintf(fp, " PA, SBW, PAB, A\n");
for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n", xpa[i],xsbw[i],xpab[i],xa[i]);}
fprintf(fp, " PXX\n");
for(int32_t i = 0; i != 128; ++i) {fprintf(fp, "%.16f\n",pxx[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp);
_mm_free(df32); _mm_free(a);
}
/*
Apply Time-Series analysis (Timsac) subroutine "EXSAR".
The data itself is invariant from the point of view of
specific subroutine i.e. "EXSAR".
*/
__attribute__((hot))
__attribute__((aligned(32)));
template<int32_t len,int32_t lagh>
void
hw_perf_metrics_exsar( const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
__attribute__((aligned(64))) double xv[lagh+1];
__attribute__((aligned(64))) double xaic[lagh+1];
__attribute__((aligned(64))) double xdaic[lagh+1];
__attribute__((aligned(64))) double xa1[lagh];
__attribute__((aligned(64))) double xa2[lagh];
double xmean = 0.0;
double xvar = 0.0;
double xaicm = 0.0;
double xsdm1 = 0.0;
double xsdm2 = 0.0;
char pad1[4];
int32_t xier = 0;
int32_t xm = 0;
char pad2[4];
FILE * fp = NULL;
DESCRIPTIVE_STATISTICS_DATA
const bool init = false; // swilk init argument.
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
exsarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0],
&xm,&xaicm,&xsdm1,&xa1[0],&xsdm2,&xa2[0],&xier);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
fprintf(fp,"HW Metric: %s, Maximum Likelihood Estimation\n", metric_name);
fprintf(fp,"xmean=%.16f,xvar=%.16f,xaicm=%.16f,xsdm1=%.16f,xsdm2=%.16f,xier=%d,xm=%d\n",
xmean,xvar,xaicm,xsdm1,xsdm2,xier,xm);
fprintf(fp,"V, AIC, DAIC \n");
for(int32_t i = 0; i != lagh1; ++i) {fprintf(fp," %.16f %.16f %.16f\n", xv[i],xaic[i],xdaic[i]);}
fprintf(fp," A1, A2 \n");
for(int32_t i = 0; i != lagh; ++i) {fprintf(fp, " %.16f %.16f\n", xa1[i],xa2[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp);
_mm_free(df32); _mm_free(a);
}
/*
Apply Time-Series analysis (Timsac) subroutine "BISPEC".
The data itself is invariant from the point of view of
specific subroutine i.e. "BISPEC".
*/
__attribute__((hot))
__attribute__((aligned(32)));
template<int32_t len,int32_t lagh>
void
hw_perf_metrics_bispec(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t lg12x = lagh*lagh+7;
const std::size_t lagh_len = static_cast<std::size_t>(lg12x);\
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
const bool init = false; // swilk init argument.
__attribute__((aligned(64))) double acor[lagh+7];
__attribute__((aligned(64))) double acov[lagh+7];
__attribute__((aligned(64))) double pspec1[lagh+7];
__attribute__((aligned(64))) double psepc2[lagh+7];
__attribute__((aligned(64))) double sig[lagh+7];
double * __restrict mnt = NULL;
double * __restrict ch = NULL;
double * __restrict br = NULL;
double * __restrict bi = NULL;
FILE * fp = NULL;
double xmean = 0.0;
double xrat = 0.0; // BISPECF result
DESCRIPTIVE_STATISTICS_DATA
if(__builtin_expect(lg12x >= 100000,1)) {
#pragma omp parallel section
{
#pragma omp section
{
mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
}
#pragma omp section
{
ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
}
#pragma omp section
{
br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
}
#pragma omp section
{
bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
}
#pragma omp section
{
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
}
#pragma omp section
{
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
}
}
const bool isnull = (NULL==mnt) || (NULL==ch) ||
(NULL==ch) || (NULL==br) ||
(NULL==bi) || (NULL==a) ||
(NULL==df32);
if(__builtin_exppect(isnull,0)) {MALLOC_FAILED}
}
else {
mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED}
ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
if(__builtin_expect(NULL==ch,0)) {MALLOC_FAILED}
br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
if(__builtin_expect(NULL==br,0)) {MALLOC_FAILED}
bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
if(__builtin_expect(NULL==bi,0)) {MALLOC_FAILED}
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
}
thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]);
bispecf_(&len,&lagh,&data[0],&mnt[0],&pspec1[0],&pspec2[0],
&sig[0],&br[0],&bi[0],&xrat);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
fprintf(fp,"HW Metric: %s, Bi-Spectrum Decomposition\n",metric_name);
fprintf(fp,"xrat=%.16f\n",xrat);
fprintf(fp," %s -- Smoothed Power Spectrum-1, Power Spectrum-2 and Significance\n", metric_name);
for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f %.16f\n", psepc1[i],pspec2[i],sig[i]);}
fprintf(fp, " %S -- Coherence, Real part, Imaginary part\n");
for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f %.16f %.16f\n",ch[i],br[i],bi[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp1);
_mm_free(bi); _mm_free(br);
_mm_free(ch); _mm_free(mnt);
_mm_free(df32); _mm_free(a);
}
/*
Apply Time-Series analysis (Timsac) subroutine "THIRMO".
The data itself is invariant from the point of view of
specific subroutine i.e. "THIRMO".
*/
__attribute__((hot))
__attribute__((aligned(32)));
template<int32_t len,int32_t lagh>
void
hw_perf_metrics_thirmo(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t lg12x = lagh*lagh+7;
const std::size_t lagh_len = static_cast<std::size_t>(lg12x);
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
const bool init = false; // swilk init argument.
__attribute__((aligned(64))) double acor[lagh+7];
__attribute__((aligned(64))) double acov[lagh+7];
double * __restrict mnt = NULL
FILE * fp = NULL;
double xmean = 0.0;
DESCRIPTIVE_STATISTICS_DATA
mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64));
if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED}
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
fprintf(fp,"HW Metric: %s Third Moments\n",metric_name);
fprintf(fp,"xmean=%.16f\n",xmean);
fprintf(fp,"ACOV, ACOR\n");
for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f\n", acov[i],acor[i]);}
fprintf(fp," %S -- Third Moment\n",metric_name);
for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f\n",mnt[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp);
_mm_free(mnt);
}
/*
Apply Time-Series analysis (Timsac) subroutine "AUTOCOR".
The data itself is invariant from the point of view of
specific subroutine i.e. "AUTOCOR".
*/
__attribute__((hot))
__attribute__((aligned(32)));
template<int32_t len,int32_t lagh>
void
hw_perf_metrics_autocor(const double * __restrict __attribute__((aligned(64))) data,
const char * __restrict fname,
const char * __restrict metric_name) {
static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!");
const int32_t len2 = len/2; // shapiro-wilk 'a' array length.
constexpr float w_limit = 0.05f;
const bool init = false; // swilk init argument.
__attribute__((aligned(64))) double acor[lagh+8];
__attribute__((aligned(64))) double acov[lagh+8];
double xmean = 0.0;
FILE * fp = NULL;
DESCRIPTIVE_STATISTICS_DATA
a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64));
if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED}
df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64));
if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED}
autocorf_(&data,&len,&acov[0],&acor[0],&lagh,&xmean);
if(fopen(&fp,fname,"a+") != 0) {
printf("File open error: %s\n",fname);
std::exit(EXIT_FAILURE);
}
fprintf(fp,"HW Metric: %s\n",metric_name);
fprintf(fp,"xmean=%.16f\n",xmean);
fprintf(fp," Series Autocorrelation and Autocovariance.\n");
for(int32_t i = 0; i != lagh; ++i) {fprintf(fp,"%.16f %.16f\n",acor[i],acov[i]);}
cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len);
std::sort(df32,df32+len);
swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault);
if(ifault!=0) printf("swilk ifault value is: %d\n",ifault);
fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw);
if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n");
if(pw>w_limit) {
fprintf(fp,"Descriptive Statistics calculations!!\n");
fprintf(fp,"====================================================\n");
srsd = relsd(&df32[0],len);
fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd);
svar = var(&df32[0],len);
fprintf(fp,"Sample Variance: %.9f\n",svar);
skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0);
fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt);
autocor = autoco(&df32[0],len);
fprintf(fp,"Autocorrelation: %.9f\n",autocor);
loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed);
fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n",
xmid,xmean,xmidm,xmed);
smin = sample_min(&df32[0],len);
fprintf(fp,"Sample Min: %.9f\n",smin);
smax = sample_max(&df32[0],len);
fprintf(fp,"Sample Max: %.9f\n",smax);
scale(&df32[0],len,xrange,xsd,xrelsd,xvar);
fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n",
xrange,xsd,xrelsd,xvar);
}
fclose(fp);
_mm_free(df32); _mm_free(a);
}
#endif /*__GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__*/
|
TriMesh.h
|
#ifndef TRIMESH_H
#define TRIMESH_H
/*
Szymon Rusinkiewicz
Princeton University
TriMesh.h
Class for triangle meshes.
*/
#define LARGENUM 10000000.0
#define ONE 1
#define CURVATURE 2
#define NOISE 3
#define EPS 1e-6
//#define SPEEDTYPE ONE
#include "Vec.h"
#include "Color.h"
#include "KDtree.h"
#include "math.h"
#include <vector>
#include <list>
#include <map>
#include <limits>
#include <iostream>
#include <fstream>
// SHIREEN
#include <iterator> // -- PM
#include <vnl/vnl_math.h>
#include <vnl/vnl_sparse_matrix.h>
#include <vnl/algo/vnl_svd.h>
#include <vnl/algo/vnl_sparse_lu.h>
#include <vcl_legacy_aliases.h>
//#define SHOW_WARNING 1
#define NUM_THREADS 8
#ifdef MP_USE_OPENMP
#include <omp.h> // -- PM
#endif
// end SHIREEN
// Praful
#include <vgl/algo/vgl_homg_operators_2d.h>
#include <vgl/vgl_conic.h>
#include <vnl/vnl_matrix.h>
#include <vnl/vnl_vector.h>
#include <vnl/algo/vnl_matrix_inverse.h>
#include <string>
#include <fstream>
#include <cstdlib>
#include <vcl_compiler.h>
// Praful end
// itk files to generate Face Index -- PM
#include "itkImage.h"
#include "itkImageRegionConstIteratorWithIndex.h"
#include "itkImageRegionIteratorWithIndex.h"
#include "itkImageFileWriter.h"
#include "itkTimeProbe.h"
#include "itkResampleImageFilter.h"
#include "itkIdentityTransform.h"
#include "itkLinearInterpolateImageFunction.h"
#include "itkBSplineInterpolateImageFunction.h"
// sets for the face index set
#include <set>
// Danny Perry's functor
#include "map.h"
///* Prateep */
//// include alglib headers
//#include "alglib/ap.h"
//#include "alglibinternal.h"
//#include "alglibmisc.h"
//#include "solvers.h"
//#include "optimization.h"
//#include "interpolation.h"
typedef float PixelType;
//using std::set; // -- PM
// end SHIREEN
using std::vector;
using std::map;
// SHIREEN
#include <algorithm>
#define PI 3.141592653589793
#ifndef MIN
#define MIN(a,b) ((a)<(b))?(a):(b)
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b))?(a):(b)
#endif
static int ITER_BLOCK = 0;
/*Prateep */
template<class TIn, class TOut, class Mesh>
struct MapFunctor
{
typedef typename TIn::ConstPointer TInP;
typedef typename TOut::Pointer TOutP;
typedef typename TOut::RegionType OutRegion;
typedef typename TIn::PixelType TPix;
// Store size, origin and spacing of super-voxel (Filled in getFaceIndexMap)
float supVoxelOrigin[3];
float supVoxelSpacing[3];
int supVoxelSize[3];
// super voxel face list
map<int, vector<int> > superVoxelFaceList;
Mesh mesh;
TOutP out_;
MapFunctor(TOutP out) : out_(out) {}
void operator()(const TInP &in, const OutRegion & threadRegion)
{
ITER_BLOCK = ITER_BLOCK + 1;
std::cout << "Iteration : " << ITER_BLOCK << std::endl;
typedef itk::ImageRegionConstIteratorWithIndex<TIn> It;
It itI(in, threadRegion);
for(itI.GoToBegin(); !itI.IsAtEnd(); ++itI) {
if(itI.Get() == 1)
{
point tmPoint;
typename TIn::PointType itkPoint;
in->TransformIndexToPhysicalPoint(itI.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
// Get neartest k vertices
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// find triangles enclosed inside each supervoxel
int tmpInd = mesh.physicalPointToLinearIndex(tmPoint, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
for(vector<int>::iterator it = superVoxelFaceList[tmpInd].begin(); it != superVoxelFaceList[tmpInd].end(); it++) {
adjFaces.push_back((*it));
}
// std::cout << "Number of neighbors : " << adjFaces.size() << std::endl;
if(adjFaces.empty() )
{
// We can either abort here or ignore the voxel
// typename TIn::IndexType ind = itI.GetIndex();
// std::cout << "-1 : " << ind[0] << ' ' << ind[1] << ' ' << ind[2] << std::endl;
out_->SetPixel(itI.GetIndex(), -1);
} else {
//std::cout << "Adjacent faces : " << this->adjacentfaces[imatch].size() << std::endl;
double minDist = LARGENUM;
int fid = -1;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = mesh.pointTriangleDistance(tmPoint, mesh.faces[*(adjFacesIt)], projPoint);
if(dist + EPS <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
out_->SetPixel(itI.GetIndex(), fid);
adjFaces.clear();
}
} else {
// typename TIn::IndexType ind = itI.GetIndex();
// std::cout << "-1 : " << ind[0] << ' ' << ind[1] << ' ' << ind[2] << std::endl;
out_->SetPixel(itI.GetIndex(), -1);
}
}
}
};
/* Prateep */
template<class TIn, class TOut, class Mesh>
struct MapFunctorKDtree
{
typedef typename TIn::ConstPointer TInP;
typedef typename TOut::Pointer TOutP;
typedef typename TOut::RegionType OutRegion;
typedef typename TIn::PixelType TPix;
Mesh mesh;
KDtree *kd;
TOutP out_;
MapFunctorKDtree(TOutP out) : out_(out) {}
void setKD() {
kd = new KDtree(mesh.vertices);
}
void operator()(const TInP &in, const OutRegion & threadRegion)
{
ITER_BLOCK = ITER_BLOCK + 1;
std::cout << "Iteratio : " << ITER_BLOCK << std::endl;
typedef itk::ImageRegionConstIteratorWithIndex<TIn> It;
It itI(in, threadRegion);
for(itI.GoToBegin(); !itI.IsAtEnd(); ++itI)
{
if(itI.Get() == 1)
{
point tmPoint;
itk::Image<PixelType, 3>::PointType itkPoint;
in->TransformIndexToPhysicalPoint(itI.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
// Get neartest vertex
const float *match = kd->closest_to_pt( tmPoint, 10.0 * sqr( mesh.getMaximumEdgeLength() ) );
if(!match)
{
out_->SetPixel(itI.GetIndex(), -1);
} else {
int imatch = (match - (const float*) &(mesh.vertices[0][0])) / 3;
//std::cout << "Adjacent faces : " << mesh.adjacentfaces[imatch].size() << std::endl;
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// Check one-ring to get list of adjacent faces
for(size_t f = 0; f < mesh.adjacentfaces[imatch].size(); f++)
{
adjFaces.push_back(mesh.adjacentfaces[imatch][f]);
}
int fid = 0;
double minDist = LARGENUM;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = mesh.pointTriangleDistance(tmPoint, mesh.faces[*(adjFacesIt)], projPoint);
if(dist + EPS <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
out_->SetPixel(itI.GetIndex(), fid);
adjFaces.clear();
}
} else {
out_->SetPixel(itI.GetIndex(), -1);
}
}
}
};
class TriMesh {
protected:
static bool read_helper(const char *filename, TriMesh *mesh);
public:
// Types
struct Face {
int v[3];
float speedInv;
float T[3];
vec3 edgeLens; // edge length for 01, 12, 20
Face() {}
Face(const int &v0, const int &v1, const int &v2)
{
v[0] = v0; v[1] = v1; v[2] = v2;
}
Face(const int *v_)
{
v[0] = v_[0]; v[1] = v_[1]; v[2] = v_[2];
}
int &operator[] (int i) { return v[i]; }
const int &operator[] (int i) const { return v[i]; }
operator const int * () const { return &(v[0]); }
operator const int * () { return &(v[0]); }
operator int * () { return &(v[0]); }
int indexof(int v_) const
{
return (v[0] == v_) ? 0 :
(v[1] == v_) ? 1 :
(v[2] == v_) ? 2 : -1;
}
};
class BBox {
public:
point min, max;
bool valid;
// Construct as empty
BBox() : min(point(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max())),
max(point(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min())),
valid(false)
{}
// Initialize to one point or two points
BBox(const point &p) : min(p), max(p), valid(true)
{}
BBox(const point &min_, const point &max_) :
min(min_), max(max_), valid(true)
{}
// Mark invalid
void clear()
{
min = point(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
max = point(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min());
valid = false;
}
// Return center point and (vector) diagonal
point center() const { return 0.5f * (min+max); }
vec size() const { return max - min; }
// Grow a bounding box to encompass a point
BBox &operator += (const point &p)
{ min.min(p); max.max(p); return *this; }
BBox &operator += (const BBox &b)
{ min.min(b.min); max.max(b.max); return *this; }
// The following appear to be necessary for Visual Studio,
// despite the fact that the operators shouldn't need
// to be friends...
friend const TriMesh::BBox operator + (const TriMesh::BBox &b, const point &p);
friend const TriMesh::BBox operator + (const point &p, const TriMesh::BBox &b);
friend const TriMesh::BBox operator + (const TriMesh::BBox &b1, const TriMesh::BBox &b2);
};
/*
struct BBox {
point min, max;
point center() const { return 0.5f * (min+max); }
vec size() const { return max - min; }
bool valid;
BBox() : valid(false)
{}
};
*/
struct BSphere {
point center;
float r;
bool valid;
BSphere() : valid(false)
{}
};
// Enums
enum tstrip_rep { TSTRIP_LENGTH, TSTRIP_TERM };
enum { GRID_INVALID = -1 };
//enum speed_type { ONE = 0, CURVATURE, NOISE };
// The basics: vertices and faces
vector< point > vertices;
vector<Face> faces;
int speedType;
// SHIREEN
// Face Index Map -- PM
typedef int VoxelIndexType;
map<VoxelIndexType, vector<int> > faceIndexMap;
// map< face, ...> didnot work
//map<Face, double > areaInvPerTri;
//map<Face, double > areaPerTri; // shireen
// map<Face,int> faceids;
// std::vector <double> areaPerTri;
// std::vector <double> areaInvPerTri;
// Store the size and index of the image domain (Filled in getFaceIndexMap)
float imageSpacing[3];
float imageOrigin[3];
int imageSize[3];
int imageIndex[3];
int number_of_voxels;
int number_of_subvoxels;
// end SHIREEN
// Triangle strips
vector<int> tstrips;
// Grid, if present
vector<int> grid;
int grid_width, grid_height;
// Other per-vertex properties
vector<Color> colors;
vector<float> confidences;
vector<unsigned> flags;
unsigned flag_curr;
// Computed per-vertex properties
vector<vec> normals;
vector<vec> pdir1, pdir2;
vector<float> curv1, curv2;
vector<float> abs_curv;
vector< Vec<4,float> > dcurv;
vector<vec> cornerareas;
vector<float> pointareas;
KDtree *kd;
double maxEdgeLength;
vector< map<unsigned int, float> > geodesicMap;
float *geodesic;
vector< vector<float> > features;
vector < vector<point> > featureGradients; //Praful - load more accurate gradient on vertices using volume for use in shapeworks
// Bounding structures
BBox bbox;
BSphere bsphere;
// Connectivity structures:
// For each vertex, all neighboring vertices
vector< vector<int> > neighbors;
// For each vertex, all neighboring faces
vector< vector<int> > adjacentfaces;
vector<double> radiusInscribe;
vector<int> getTwoNeighbors(int v){
vector<int> twoNeighbors;
// for each neighbor
for(int i=0; i < this->neighbors[v].size(); i++){
// add self
int n = this->neighbors[v][i];
twoNeighbors.push_back(n);
// add neighbors
for(int j=0; j < this->neighbors[n].size(); j++)
twoNeighbors.push_back( this->neighbors[n][j] );
}
return twoNeighbors;
}
vector< vector<Face> > vertOneringFaces;
// For each face, the three faces attached to its edges
// (for example, across_edge[3][2] is the number of the face
// that's touching the edge opposite vertex 2 of face 3)
vector<Face> across_edge;
vector<float> noiseOnVert;
int getSpeedType(){
return speedType;
}
//int SPEEDTYPE;
// Compute all this stuff...
void setSpeedType(int st)
{
//ST = st;
speedType = st;
if(st == ONE){
//iMap = &geoIndex;
//dMap = &geoMap;
}
else if(st == CURVATURE){
//iMap = &adaptIndex;
//dMap = &adaptMap;
}
else{
std::cout << "Impossible SpeedType set" << std::endl;
throw(1); //exit(1337);
}
}
void need_tstrips();
void convert_strips(tstrip_rep rep);
void unpack_tstrips();
void triangulate_grid();
void need_faces()
{
if (!faces.empty())
return;
if (!tstrips.empty())
unpack_tstrips();
else if (!grid.empty())
triangulate_grid();
}
void need_faceedges();
void need_speed();
void need_noise(int nNoiseIter);
void need_oneringfaces();
void need_kdtree();
void need_maxedgelength();
void need_normals();
void need_pointareas();
void need_curvatures();
void need_abs_curvatures();
void need_dcurv();
void need_bbox();
void need_bsphere();
void need_neighbors();
void need_adjacentfaces();
void need_across_edge();
void need_meshinfo();
void need_Rinscribe();
// Input and output
static TriMesh *read(const char *filename);
bool write(const char *filename);
// Statistics
// XXX - Add stuff here
float feature_size();
// Useful queries
// XXX - Add stuff here
bool is_bdy(int v)
{
if (neighbors.empty()) need_neighbors();
if (adjacentfaces.empty()) need_adjacentfaces();
return neighbors[v].size() != adjacentfaces[v].size();
}
vec trinorm(int f)
{
if (faces.empty()) need_faces();
return ::trinorm(vertices[faces[f][0]], vertices[faces[f][1]],
vertices[faces[f][2]]);
}
// FIM: check angle for at a given vertex, for a given face
bool IsNonObtuse(int v, Face f)
{
int iV = f.indexof(v);
point A = this->vertices[v];
point B = this->vertices[f[(iV+1)%3]];
point C = this->vertices[f[(iV+2)%3]];
float a = dist(B,C);
float b = dist(A,C);
float c = dist(A,B);
float angA = 0.0; /* = acos( (b*b + c*c - a*a) / (2*b*c) )*/
if ((a > 0) && (b > 0) && (c > 0))// Manasi stack overflow
{// Manasi stack overflow
angA = acos( (b*b + c*c - a*a) / (2*b*c) );// Manasi stack overflow
}// Manasi stack overflow
//return ( angA - PI/2.0f < -0.00001 );
return ( angA < M_PI/2.0f );
}
// FIM: given a vertex, find an all-acute neighborhood of faces
void SplitFace(vector<Face> &acFaces, int v, Face cf, int nfAdj/*, int currentVert*/)
{
// get all the four vertices in order
/* v1 v4
+-------+
\ . \
\ . \
\ . \
+-------+
v2 v3 */
int iV = cf.indexof(v); // get index of v in terms of cf
int v1 = v;
int v2 = cf[(iV+1)%3];
int v4 = cf[(iV+2)%3];
iV = this->faces[nfAdj].indexof(v2); // get index of v in terms of adjacent face
int v3 = this->faces[nfAdj][(iV+1)%3];
// create faces (v1,v3,v4) and (v1,v2,v3), check angle at v1
Face f1(v1,v3,v4);
//f1.T[f1.indexof(v1)] = this->vertT[currentVert][v1];
//f1.T[f1.indexof(v3)] = this->vertT[currentVert][v3];
//f1.T[f1.indexof(v4)] = this->vertT[currentVert][v4];
Face f2(v1,v2,v3);
//f2.T[f2.indexof(v1)] = this->vertT[currentVert][v1];
//f2.T[f2.indexof(v2)] = this->vertT[currentVert][v2];
//f2.T[f2.indexof(v3)] = this->vertT[currentVert][v3];
if (IsNonObtuse(v,f1))
{
//switch (SPEEDTYPE)
switch(speedType)
{
case CURVATURE:
/*
f1.speedInv = ( abs(curv1[f1[0]] + curv2[f1[0]]) +
abs(curv1[f1[1]] + curv2[f1[1]]) +
abs(curv1[f1[2]] + curv2[f1[2]]) ) / 6;
*/
f1.speedInv = ( abs_curv[f1[0]] +
abs_curv[f1[1]] +
abs_curv[f1[2]] ) / 3.0;
break;
case ONE:
f1.speedInv = 1.0;
break;
case NOISE:
f1.speedInv = (noiseOnVert[f1[0]] +
noiseOnVert[f1[1]] +
noiseOnVert[f1[2]]) / 3;
default:
f1.speedInv = 1.0;
break;
}
vec3 edge01 = (vec3)(vertices[f1[1]] - vertices[f1[0]]);
vec3 edge12 = (vec3)(vertices[f1[2]] - vertices[f1[1]]);
vec3 edge20 = (vec3)(vertices[f1[0]] - vertices[f1[2]]);
f1.edgeLens[0] =sqrt(edge01[0]*edge01[0] + edge01[1]*edge01[1] + edge01[2]*edge01[2]);
f1.edgeLens[1] =sqrt(edge12[0]*edge12[0] + edge12[1]*edge12[1] + edge12[2]*edge12[2]);
f1.edgeLens[2] =sqrt(edge20[0]*edge20[0] + edge20[1]*edge20[1] + edge20[2]*edge20[2]);
acFaces.push_back(f1);
}
else
{
int nfAdj_new = this->across_edge[nfAdj][this->faces[nfAdj].indexof(v2)];
if (nfAdj_new > -1)
{
SplitFace(acFaces,v,f1,nfAdj_new/*, currentVert*/);
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(acFaces,v,f1,nfAdj_new, currentVert);
}
if (IsNonObtuse(v,f2))
{
//switch (SPEEDTYPE)
switch(speedType)
{
case CURVATURE:
/*
f2.speedInv = ( abs(curv1[f2[0]] + curv2[f2[0]]) +
abs( curv1[f2[1]] + curv2[f2[1]]) +
abs(curv1[f2[2]] + curv2[f2[2]]) ) / 6;
*/
f2.speedInv = ( abs_curv[f2[0]] +
abs_curv[f2[1]] +
abs_curv[f2[2]] ) / 3.0;
break;
case ONE:
f2.speedInv = 1.0;
break;
case NOISE:
f2.speedInv = (noiseOnVert[f2[0]] +
noiseOnVert[f2[1]] +
noiseOnVert[f2[2]]) / 3;
break;
default:
f2.speedInv = 1.0;
break;
}
vec3 edge01 = (vec3)(vertices[f2[1]] - vertices[f2[0]]);
vec3 edge12 = (vec3)(vertices[f2[2]] - vertices[f2[1]]);
vec3 edge20 = (vec3)(vertices[f2[0]] - vertices[f2[2]]);
f2.edgeLens[0] =sqrt(edge01[0]*edge01[0] + edge01[1]*edge01[1] + edge01[2]*edge01[2]);
f2.edgeLens[1] =sqrt(edge12[0]*edge12[0] + edge12[1]*edge12[1] + edge12[2]*edge12[2]);
f2.edgeLens[2] =sqrt(edge20[0]*edge20[0] + edge20[1]*edge20[1] + edge20[2]*edge20[2]);
acFaces.push_back(f2);
}
else
{
int nfAdj_new = this->across_edge[nfAdj][this->faces[nfAdj].indexof(v4)];
if (nfAdj_new > -1)
{
SplitFace(acFaces,v,f2,nfAdj_new/*,currentVert*/);
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(acFaces,v,f2,nfAdj_new,currentVert);
}
}
// FIM: one ring function
vector<Face> GetOneRing(int v/*, int currentVert*/)
{
// make sure we have the across-edge map
if (this->across_edge.empty())
this->need_across_edge();
// variables required
vector<Face> oneRingFaces;
vector<Face> t_faces;
// get adjacent faces
int naf = this->adjacentfaces[v].size();
if (!naf)
{
std::cout << "vertex " << v << " has 0 adjacent faces..." << std::endl;
}
else
{
for (int af = 0; af < naf; af++)
{
Face cf = this->faces[adjacentfaces[v][af]];
t_faces.clear();
if(IsNonObtuse(v,cf))// check angle: if non-obtuse, return existing face
{
//this->colors[cf[0]] = Color::red();
//this->colors[cf[1]] = Color::red();
//this->colors[cf[2]] = Color::red();
t_faces.push_back(cf);
}
else
{
//t_faces.push_back(cf);
int nfae = this->across_edge[this->adjacentfaces[v][af]][cf.indexof(v)];
if (nfae > -1)
{
SplitFace(t_faces,v,cf,nfae/*,currentVert*/);// if obtuse, split face till we get all acute angles
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(t_faces,v,cf,nfae,currentVert);// if obtuse, split face till we get all acute angles
}
for (int tf = 0; tf < t_faces.size(); tf++)
{
//this->colors[t_faces[tf][0]] = Color::red();
//this->colors[t_faces[tf][1]] = Color::red();
//this->colors[t_faces[tf][2]] = Color::red();
oneRingFaces.push_back(t_faces[tf]);
}
}
}
//this->colors[v] = Color::green();
return oneRingFaces;
}
// FIM: initialize attributes
//typedef std::<int> ListType;
void InitializeAttributes(int currentVert , std::vector<int> seeds = vector<int>() )
{
int nv = this->vertices.size();
this->geodesic = new float[nv];
for(int v= 0; v < nv; v++){
geodesic[v] = LARGENUM;
}
// initialize seed points if present...
if (!seeds.empty()){
int ns = seeds.size();
for (int s = 0; s < ns; s++){
//this->vertMap[currentVert][seeds[s]].d = 0;
geodesic[seeds[s]] = 0;
}
}
// pre-compute faces, normals, and other per-vertex properties that may be needed
this->need_neighbors();
this->need_normals();
this->need_adjacentfaces();
this->need_across_edge();
this->need_faces();
/* HOW DO WE DO THIS USING NEW GEODESIC DATA STRUCTURE?
// for all faces: initialize per-vertex travel time and face-speed
int nf = this->faces.size();
for (int f = 0; f < nf; f++)
{
Face cf = this->faces[f];
// travel time
faces[f].T[0] = this->vertT[currentVert][cf[0]];
faces[f].T[1] = this->vertT[currentVert][cf[1]];
faces[f].T[2] = this->vertT[currentVert][cf[2]];
}
*/
}
// FIM: Remove data lingering from computation
void CleanupAttributes(int currentVert)
{
delete [] this->geodesic;
}
/* Prateep */
void WriteFaceIndexMap(const char* outfilename)
{
std::ofstream fout(outfilename, std::ios::out);
map<VoxelIndexType, vector<int> >::iterator faceIndexMapIt;
vector<int>::iterator faceIndexIt;
for(faceIndexMapIt = this->faceIndexMap.begin(); faceIndexMapIt != this->faceIndexMap.end(); faceIndexMapIt++)
{
fout << (int) (*faceIndexMapIt).first << ": ";
for(faceIndexIt = faceIndexMapIt->second.begin(); faceIndexIt != faceIndexMapIt->second.end(); faceIndexIt++) {
fout << (*faceIndexIt) << " ";
}
fout << std::endl;
}
fout.close();
}
/* Prateep */
void ReadFaceIndexMap(const char* infilename)
{
std::ifstream infile(infilename);
if(!infile.is_open())
{
std::cout << "File Not Found:" << infilename << std::endl;
}
else
{
std::cout << "reading face indices from " << infilename << std::endl;
// map<VoxelIndexType, set<int> > tmpFaceIndexMap;
std::string line;
while(infile)
{
getline(infile, line);
std::stringstream ss(line);
VoxelIndexType index;
char delim;
ss >> index >> delim;
int fid;
while(ss >> fid) {
this->faceIndexMap[index].push_back(fid);
// tmpFaceIndexMap[index].insert(fid);
}
}
// if(tmpFaceIndexMap.size() != 0 )
// {
// this->faceIndexMap = tmpFaceIndexMap;
// }
// tmpFaceIndexMap.clear(); // clear memory
infile.close();
}
}
void ClearFaceIndexMap()
{
this->faceIndexMap.clear();
}
/* Prateep */
float getMaximumEdgeLength()
{
double s = 0.0f;
for(unsigned int f = 0; f < this->faces.size(); f++)
{
if(s < this->faces[f].edgeLens[0]) { // 01
s = this->faces[f].edgeLens[0];
}
if(s < this->faces[f].edgeLens[1]) { // 12
s = this->faces[f].edgeLens[1];
}
if(s < this->faces[f].edgeLens[2]) { // 20
s = this->faces[f].edgeLens[2];
}
}
return s;
}
/* Prateep */
void physicalPointToXYZ(point x, VoxelIndexType* imageX) // physical to image coordinates
{
imageX[0] = static_cast<VoxelIndexType> ( (x[0] - this->imageOrigin[0]) / this->imageSpacing[0] );
imageX[1] = static_cast<VoxelIndexType> ( (x[1] - this->imageOrigin[1]) / this->imageSpacing[1] );
imageX[2] = static_cast<VoxelIndexType> ( (x[2] - this->imageOrigin[2]) / this->imageSpacing[2] );
}
void physicalPointToXYZ(point x, VoxelIndexType* imageX, float imageOrigin[3], float imageSpacing[3])
{
imageX[0] = static_cast<VoxelIndexType> ( (x[0] - imageOrigin[0]) / imageSpacing[0] );
imageX[1] = static_cast<VoxelIndexType> ( (x[1] - imageOrigin[1]) / imageSpacing[1] );
imageX[2] = static_cast<VoxelIndexType> ( (x[2] - imageOrigin[2]) / imageSpacing[2] );
}
/* Prateep */
point indexToPhysicalPoint(VoxelIndexType* imageX)
{
point ret;
ret[0] = static_cast<float> ( imageX[0] * this->imageSpacing[0] + this->imageOrigin[0] );
ret[1] = static_cast<float> ( imageX[1] * this->imageSpacing[1] + this->imageOrigin[1] );
ret[2] = static_cast<float> ( imageX[2] * this->imageSpacing[2] + this->imageOrigin[2] );
return ret;
}
/* Prateep */
point indexToPhysicalPoint(VoxelIndexType* imageX, float origin[3], float spacing[3])
{
point ret;
ret[0] = static_cast<float> ( imageX[0] * spacing[0] + origin[0] );
ret[1] = static_cast<float> ( imageX[1] * spacing[1] + origin[1] );
ret[2] = static_cast<float> ( imageX[2] * spacing[2] + origin[2] );
return ret;
}
/* Prateep */
bool isInsideImageBuffer(itk::Image<int,3>::IndexType ind)
{
if(ind[0] >= this->imageIndex[0] && ind[0] < this->imageSize[0] &&
ind[1] >= this->imageIndex[1] && ind[1] < this->imageSize[1] &&
ind[2] >= this->imageIndex[2] && ind[2] < this->imageSize[2]
) {
return true;
}
else {
return false;
}
}
// SHIREEN
VoxelIndexType physicalPointToLinearIndex(point x)
{
VoxelIndexType imageX[3];
this->physicalPointToXYZ(x,imageX,this->imageOrigin, this->imageSpacing);
VoxelIndexType linearIndX = this->indexToLinearIndex(imageX, this->imageSize);
return linearIndX;
}
VoxelIndexType physicalPointToLinearIndex(point x, float imageOrigin[3], float imageSpacing[3], int imageSize[3])
{
VoxelIndexType imageX[3];
this->physicalPointToXYZ(x,imageX,imageOrigin,imageSpacing);
VoxelIndexType linearIndX = this->indexToLinearIndex(imageX, imageSize);
return linearIndX;
}
void linearIndexToXYZ(VoxelIndexType linearIndX, VoxelIndexType* imageX, int imageSize[3])
{
// convert linear index to r, c, s
imageX[2] = linearIndX / (imageSize[0]*imageSize[1]); // slice value (Note: integer division)
linearIndX %= (imageSize[0]*imageSize[1]);
imageX[1] = linearIndX / imageSize[0]; // column value (Note: integer division)
imageX[0] = linearIndX % imageSize[0]; // row value
}
void linearIndexToXYZ(VoxelIndexType linearIndX, VoxelIndexType* imageX)
{
// convert linear index to r, c, s
imageX[2] = linearIndX / (imageSize[0]*imageSize[1]); // slice value (Note: integer division)
linearIndX %= (imageSize[0]*imageSize[1]);
imageX[1] = linearIndX / imageSize[0]; // column value (Note: integer division)
imageX[0] = linearIndX % imageSize[0]; // row value
}
VoxelIndexType indexToLinearIndex(VoxelIndexType* imageX, int imageSize[3])
{
VoxelIndexType linearIndX = imageX[0] + imageX[1] * imageSize[0] + imageX[2] * imageSize[0] * imageSize[1];
return linearIndX;
}
VoxelIndexType indexToLinearIndex(VoxelIndexType* imageX)
{
VoxelIndexType linearIndX = imageX[0] + imageX[1] * imageSize[0] + imageX[2] * imageSize[0] * imageSize[1];
return linearIndX;
}
point linearIndexToPhysicalPoint(VoxelIndexType linearIndX, float imageOrigin[3], float imageSpacing[3], int imageSize[3])
{
VoxelIndexType imageX[3];
this->linearIndexToXYZ(linearIndX, imageX, imageSize);
point p = this->indexToPhysicalPoint(imageX, imageOrigin, imageSpacing);
return p;
}
point linearIndexToPhysicalPoint(VoxelIndexType linearIndX)
{
VoxelIndexType imageX[3];
this->linearIndexToXYZ(linearIndX, imageX, imageSize);
point p = this->indexToPhysicalPoint(imageX, imageOrigin, imageSpacing);
return p;
}
// end SHIREEN
/* Prateep
* http://www.geometrictools.com/Documentation/DistancePoint3Triangle3.pdf
* ^t
* \ |
* \reg2|
* \ |
* \ |
* \ |
* \|
* *P2
* |\
* | \
* reg3 | \ reg1
* | \
* |reg0\
* | \
* | \ P1
* -------*-------*------->s
* |P0 \
* reg4 | reg5 \ reg6
*/
double pointTriangleDistance(point P, Face face, point& PP)
{
// rewrite vertices in normal form
point B = this->vertices[face.v[0]];
point E0 = this->vertices[face.v[1]] - B;
point E1 = this->vertices[face.v[2]] - B;
point D = B - P;
float a = E0 DOT E0;
float b = E0 DOT E1;
float c = E1 DOT E1;
float d = E0 DOT D;
float e = E1 DOT D;
float f = D DOT D;
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
float distSqr = 0.0f;
if(s+t <= det) {
if(s < 0) {
if(t < 0) {
// region 4
if(d < 0) {
t = 0;
if(-d >= a) {
s = 1.0;
distSqr = a + 2.0f*d + f;
} else {
s = -d/a;
distSqr = d*s + f;
}
} else {
s = 0.0f;
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
if(-e >= c) {
t = 1.0f;
distSqr = c + 2.0f*e + f;
} else {
t = -e/c;
distSqr = e*t + f;
}
}
} // end of region 4
} else {
// region 3
s = 0.0f;
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
if(-e >= c) {
t = 1.0f;
distSqr = c + 2.0f*e + f;
} else {
t = -e/c;
distSqr = e*t + f;
}
}
} // end of region 3
} else {
if(t < 0.0f) {
// region 5
t = 0.0f;
if (d >= 0.0f) {
s = 0.0f;
distSqr = f;
} else {
if(-d >= a) {
s = 1.0f;
distSqr = a + 2*d + f;
} else {
s = -d/a;
distSqr = d*s + f;
}
}
// end of region 5
} else {
// region 0
float invDet = 1.0f/det;
s *= invDet;
t *= invDet;
distSqr = s * (a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
// end of region 0
}
}
} else {
if(s < 0.0f) {
// region 2
float tmp0 = b+d;
float tmp1 = c+e;
if(tmp1 > tmp0) {
float numer = tmp1 - tmp0;
float denom = a - 2*b + c;
if(numer >= denom) {
s = 1.0f;
t = 0.0f;
distSqr = a + 2*d + f;
} else {
s = numer / denom;
t = 1.0 - s;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
} else {
s = 0.0f;
if(tmp1 <= 0.0f) {
t = 1.0f;
distSqr = c + 2*e + f;
} else {
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
t = -e/c;
distSqr = e*t+f;
}
}
}
// end of region 2
} else {
if(t < 0) {
// region 6
float tmp0 = b + e;
float tmp1 = a + d;
if(tmp1 > tmp0) {
float numer = tmp1 - tmp0;
float denom = a-2*b+c;
if(numer >= denom) {
t = 1.0f;
s = 0.0f;
distSqr = c + 2*e + f;
} else {
t = numer / denom;
s = 1.0 - t;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
} else {
t = 0.0f;
if(tmp1 <= 0.0f) {
s = 1.0f;
distSqr = a + 2*d + f;
} else {
if(d >= 0.0f) {
s = 0.0f;
distSqr = f;
} else {
s = -d/a;
distSqr = d*s + f;
}
}
}
// end of region 6
} else {
// region 1
float numer = c + e - b - d;
if(numer <= 0) {
s = 0.0f;
t = 1.0f;
distSqr = c + 2*e + f;
} else {
float denom = a - 2*b + c;
if(numer >= denom) {
s = 1.0f;
t = 0.0f;
distSqr = a + 2*d + f;
} else {
s = numer / denom;
t = 1.0f-s;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
}
// end of region 1
}
}
}
if (distSqr < 0.0f) distSqr = 0.0f;
float dist = std::sqrt(distSqr);
PP = B + s * E0 + t * E1;
return dist;
}
/* Prateep */
int pointTriangleRegion(point P, Face face)
{
// rewrite vertices in normal form
point B = this->vertices[face.v[0]];
point E0 = this->vertices[face.v[1]] - B;
point E1 = this->vertices[face.v[2]] - B;
point D = B - P;
float a = E0 DOT E0;
float b = E0 DOT E1;
float c = E1 DOT E1;
float d = E0 DOT D;
float e = E1 DOT D;
float f = D DOT D;
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
float distSqr = 0.0f;
int region;
if(s+t <= det) {
if(s < 0) {
if(t < 0) {
// region 4
region = 4;
// end of region 4
} else {
// region 3
region = 3;
// end of region 3
}
} else if(t < 0.0f) {
// region 5
region = 5;
// end of region 5
} else {
// region 0
region = 0;
// end of region 0
}
} else {
if(s < 0.0f) {
// region 2
region = 2;
// end of region 2
} else {
if(t < 0) {
// region 6
region = 6;
// end of region 6
} else {
// region 1
region = 1;
// end of region 1
}
}
}
return region;
}
/* Prateep */
void generateFaceIndexMapViaKDtree(itk::Image<PixelType, 3>::ConstPointer narrowBand, int number_of_subvoxels = 1,
int num_threads = 1, std::string debug_prefix = "")
{
if( !this->kd ) this->need_kdtree();
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
const double eps = 1e-10;
// inline DeepCopy
itk::Image<PixelType, 3>::Pointer OutputImage = itk::Image<PixelType, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<PixelType,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
std::cout << "FidsViaKDTree. Starting functor ...\n";
itk::TimeProbe clock;
clock.Start();
{
typedef MapFunctorKDtree< itk::Image<PixelType,3>, itk::Image<PixelType,3>, TriMesh > FType;
FType functor(OutputImage);
// We need kdtree, so input vertices, faces and adjacentfaces. Rest are just function calls for TriMesh::. So, other members need not be past.
functor.mesh = *this;
functor.mesh.faces = this->faces;
functor.mesh.vertices = this->vertices;
functor.mesh.adjacentfaces = this->adjacentfaces;
functor.setKD();
bambam::map< itk::Image<PixelType, 3>, itk::Image<PixelType,3>, FType>::run(narrowBand, functor, num_threads);
}
clock.Stop();
std::cout << "Time taken (functor)\n";
std::cout << "Mean : " << clock.GetMean() << std::endl;
std::cout << "Total : " << clock.GetTotal() << std::endl;
std::cout << "---------------------------\n";
if(debug_prefix.compare("") != 0)
{
itk::ImageFileWriter< itk::Image<PixelType,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::string f = debug_prefix + ".faceInd.nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
saveFidsViaKDtreeDistanceMap(OutputImage, debug_prefix);
}
// Collect values in faceIndexMap
itk::Image<PixelType, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<PixelType, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<PixelType, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<PixelType, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain of the original DT
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain of the original DT
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
// collect the results from the subvoxels to the original voxels
for(unsigned int i = 0; i < size[0]; i++) { // X
for(unsigned int j = 0; j < size[1]; j++) { // Y
for(unsigned int k = 0; k < size[2]; k++) { // Z
itk::Image<PixelType, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
//if((i==256) && (j==80) &&(k==256) )
// int p=0;
if(OutputImage->GetPixel(idx) > -1)
{
itk::Image<PixelType, 3>::IndexType idx2;
idx2[0] = (VoxelIndexType)floor((float)i / (float)number_of_subvoxels);
idx2[1] = (VoxelIndexType)floor((float)j / (float)number_of_subvoxels);
idx2[2] = (VoxelIndexType)floor((float)k / (float)number_of_subvoxels);
VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
this->faceIndexMap[idx1].push_back( OutputImage->GetPixel(idx) );
}
}
}
}
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
if(debug_prefix.compare("") > 0)
{
std::cout << "Now saving distance map...";
saveFidsViaKDtreeDistanceMap(OutputImage, debug_prefix);
// std::cout << "Now saving signed distance map...";
// saveFidsSignedDistanceMap(OutputImage, debug_prefix);
// std::cout << "Done\n";
}
}
/* Prateep */
void generateFaceIndexMapViaSuperVoxel(itk::Image<PixelType, 3>::ConstPointer narrowBand, itk::Image<PixelType, 3>::Pointer scaledDT,
float q, float ldelta,
int number_of_subvoxels = 1, int number_of_voxels = 1, float radiusFactor = 1.0, // search for all neighbors
int num_threads = 1, std::string debug_prefix = "", std::string debug_suffix = "", bool saveFaceIndMap = false)
{
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
this->number_of_voxels = number_of_voxels;
// inline DeepCopy
itk::Image<int, 3>::Pointer OutputImage = itk::Image<int, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
//itk::ImageRegionIteratorWithIndex< itk::Image<int,3> > narrowBandItShared(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<int,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
//narrowBandItShared.GoToBegin();
OutputImageIt.GoToBegin();
#ifdef MP_USE_OPENMP
omp_set_num_threads(6);
#endif
int nf = this->faces.size();
itk::Image<int, 3>::PointType ori = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType sp = OutputImage->GetSpacing();
itk::Image<int, 3>::SizeType si = OutputImage->GetLargestPossibleRegion().GetSize();
float supVoxelOrigin[3];
supVoxelOrigin[0] = (float) (ori[0]);
supVoxelOrigin[1] = (float) (ori[1]);
supVoxelOrigin[2] = (float) (ori[2]);
float supVoxelSpacing[3];
supVoxelSpacing[0] = (float) (sp[0] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[1] = (float) (sp[1] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[2] = (float) (sp[2] * number_of_voxels * number_of_subvoxels);
int supVoxelSize[3];
supVoxelSize[0] = (int) ((si[0] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[1] = (int) ((si[1] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[2] = (int) ((si[2] / (float)number_of_subvoxels) / (float)number_of_voxels);
// super voxel face list
map<int, vector<int> > superVoxelFaceList;
// 1. Compute \sigma --> maximum physical distance for each supVoxel from isosurface
//int sigmaSV = -LARGENUM;
// // // shireen debug
// VoxelIndexType vox_ind = 2524975; //2574480; // vox_ind == 2524975 || vox_ind == 8340369)
// // VoxelIndexType voxX[3], voxXX[3];
// // this->linearIndexToXYZ(vox_ind, voxX, this->imageSize);
// // VoxelIndexType vox_ind22 = this->indexToLinearIndex(voxX, this->imageSize);
// // point vox_p = this->indexToPhysicalPoint(voxX, this->imageOrigin, this->imageSpacing);
// // this->physicalPointToXYZ(vox_p, voxXX, this->imageOrigin, this->imageSpacing);
// // VoxelIndexType vox_ind2 = this->physicalPointToLinearIndex(vox_p, this->imageOrigin, this->imageSpacing, this->imageSize);
// point vox_p = linearIndexToPhysicalPoint(vox_ind, this->imageOrigin, this->imageSpacing, this->imageSize);
// // VoxelIndexType vox_ind2;
// // vox_ind2 = this->physicalPointToLinearIndex(vox_p, this->imageOrigin, this->imageSpacing, this->imageSize);
// VoxelIndexType imageX[3];
// this->physicalPointToXYZ(vox_p, imageX, supVoxelOrigin, supVoxelSpacing);
// VoxelIndexType imageX_[3];
// this->physicalPointToXYZ(vox_p, imageX_, this->imageOrigin, this->imageSpacing);
// int iter2 = 0;
// for(int i = 0; i < supVoxelSize[0]; i++) {
// for(int j = 0; j < supVoxelSize[1]; j++) {
// for(int k = 0; k < supVoxelSize[2]; k++) {
// if(i == imageX[0] && j == imageX[1] && k ==imageX[2])
// {
// int hihi = 0;
// }
// iter2++;
// }
// }
// }
// // end shireen debug
int iter = 0;
for(int i = 0; i < supVoxelSize[0]; i++) {
for(int j = 0; j < supVoxelSize[1]; j++) {
for(int k = 0; k < supVoxelSize[2]; k++) {
// the super voxel index
VoxelIndexType p[3];
p[0] = i; p[1] = j; p[2] = k;
// converting the supervoxel index to a physical point in space
point supV = this->indexToPhysicalPoint(p, supVoxelOrigin, supVoxelSpacing);
// poit to itkpoint
itk::Image<int, 3>::PointType supVp;
for(int ii = 0; ii < 3; ii++) supVp[ii] = supV[ii];
// Get the ball center
point supVCent = supV;
for(int ii = 0; ii < 3; ii++) {
supVCent[ii] += (float) q/2.0;
}
// // shireen debug
// if(imageX[0] == i && imageX[1] == j && imageX[2] == k)
// {
// int test = 0;
// }
// VoxelIndexType sind = this->physicalPointToLinearIndex(vox_p, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// VoxelIndexType sind2 = this->physicalPointToLinearIndex(supV , supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// if (sind == sind2)
// int test = 0;
// // end shireen debug
// get the subvoxel index of the supervoxel point (where is it lying in the scaled distance transform
itk::Image<PixelType,3>::IndexType supVInd;
//for(int ii = 0; ii < 3; ii++) supVInd[ii] = supV[ii];
scaledDT->TransformPhysicalPointToIndex(supVp, supVInd); // shireen, refer to the center not the corner
// get the distance (encoded in the distance transform) of this supervoxel from the mesh surface
float sigma = scaledDT->GetPixel(supVInd); // shireen debug float not int
// isotropic scaling
sigma *= scaledDT->GetSpacing()[0] * this->number_of_subvoxels;
// 2. Get ball radius
float ballRadiusSV = q + std::sqrt(ldelta*ldelta + sigma*sigma);
ballRadiusSV /= radiusFactor;
VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// 3. Get supVoxelFaceList
// #pragma omp parallel
{
// #pragma omp for
for(int f = 0; f < nf; f++) {
// if (f == 1536 || f == 4608)
// int tst = 0;
//std::cout << "Face # : " << f << std::endl;
point pp;
double d = this->pointTriangleDistance(supVCent, this->faces[f], pp);
if(d < ballRadiusSV + EPS)
{
//VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind].push_back(f);
}
}
}
iter++;
if(superVoxelFaceList[ind].size() != 0)
std::cout << "iter : " << iter << " ,length(Facelist) = " << superVoxelFaceList[ind].size() << "\n";
//else
// std::cout << "iter : " << iter << "ZERO!!!!!!!!! "<< "\n";
}
}
}
/*
std::string outfilename = "tmpSize.txt";
std::ofstream fout( outfilename.c_str(), std::ios::out);
for(map<int, set<int> >::iterator it = superVoxelFaceList.begin(); it != superVoxelFaceList.end(); it++)
{
fout << (int) (*it).first << ": ";
// for(set<int>::iterator it1 = it->second.begin(); it1 != it->second.end(); it1++) {
// fout << (*it1) << " ";
// }
fout << (int) (*it).second.size();
fout << std::endl;
}
fout.close();
*/
std::cout << "Starting functor ...\n";
itk::TimeProbe clock;
clock.Start();
{
typedef MapFunctor< itk::Image<PixelType,3>, itk::Image<int,3>, TriMesh > FType;
FType functor(OutputImage);
functor.superVoxelFaceList = superVoxelFaceList;
for(int i = 0; i < 3; i++) {
functor.supVoxelOrigin[i] = supVoxelOrigin[i];
functor.supVoxelSize[i] = supVoxelSize[i];
functor.supVoxelSpacing[i] = supVoxelSpacing[i];
}
// We only need faces. Rest are just function calls for TriMesh::. So, other members need not be past.
functor.mesh = *this;
functor.mesh.faces = this->faces;
bambam::map< itk::Image<PixelType, 3>, itk::Image<int,3>, FType>::run(narrowBand, functor, num_threads);
}
clock.Stop();
std::cout << "Time taken (functor)\n";
std::cout << "Mean : " << clock.GetMean() << std::endl;
std::cout << "Total : " << clock.GetTotal() << std::endl;
std::cout << "---------------------------\n";
if(saveFaceIndMap)
{
if(debug_prefix.compare ("") != 0)
{
itk::ImageFileWriter< itk::Image<int,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<int, 3> >::New();
//std::stringstream ss; ss << radiusFactor;
//std::stringstream sp; sp << this->imageSpacing[0];
std::string f = debug_prefix + ".faceInd" + debug_suffix + ".nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
//saveFidsViaSuperVoxelDistanceMap(OutputImage, debug_prefix, radiusFactor); // using the subvoxel resolution not the original one
}
}
std::cout << "Computing .fids ...";
// Collect values in faceIndexMap
itk::Image<int, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<int, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<int, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
this->imageIndex[0] = (int) (index[0] / number_of_subvoxels);
this->imageIndex[1] = (int) (index[1] / number_of_subvoxels);
this->imageIndex[2] = (int) (index[2] / number_of_subvoxels);
VoxelIndexType minIndex = this->imageIndex[0] + this->imageIndex[1] * this->imageSize[0] + this->imageIndex[2] * this->imageSize[0] * this->imageSize[1];
VoxelIndexType maxIndex = (this->imageSize[0]-1) + (this->imageSize[1]-1) * this->imageSize[0] + (this->imageSize[2]-1) * this->imageSize[0] * this->imageSize[1];
for(unsigned int i = index[0]; i < size[0]; i++) { // X
for(unsigned int j = index[1]; j < size[1]; j++) { // Y
for(unsigned int k = index[2]; k < size[2]; k++) { // Z
itk::Image<int, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
if(OutputImage->GetPixel(idx) > -1) {
itk::Image<int, 3>::IndexType idx2;
idx2[0] = (int) (i / number_of_subvoxels);
idx2[1] = (int) (j / number_of_subvoxels);
idx2[2] = (int) (k / number_of_subvoxels);
for(int dx = -2; dx <= 2; dx++) {
for(int dy = -2; dy <= 2; dy++) {
for(int dz = -2; dz <= 2; dz++) {
itk::Image<int,3>::IndexType idx22;
idx22[0] = idx2[0] + dx;
idx22[1] = idx2[1] + dy;
idx22[2] = idx2[2] + dz;
if( !isInsideImageBuffer(idx22)) continue;
VoxelIndexType idx1 = idx22[0] + idx22[1] * this->imageSize[0] + idx22[2] * this->imageSize[0] * this->imageSize[1];
if(idx1 < 0) {
std::cout << "Index neg " << idx1 << std::endl;
continue;
}
// SHIREEN: when moving from set to vector for memory footprint, we need to make sure taht we are not pushing duplicate candidate faces
// otherwise the fids file will be huge (multiples of gigs)
int curf = OutputImage->GetPixel(idx);
if (std::find(this->faceIndexMap[idx1].begin(), this->faceIndexMap[idx1].end(), curf) == this->faceIndexMap[idx1].end()) // current candidate has not been pushed back before
this->faceIndexMap[idx1].push_back( curf );
// VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
// this->faceIndexMap[idx1].insert( OutputImage->GetPixel(idx) );
}
}
}
}
}
}
}
std::cout << "Done";
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
if(debug_prefix.compare("") > 0)
{
// std::cout << "Now saving distance map...";
// saveFidsDistanceMap(OutputImage, debug_prefix, radiusFactor);
std::cout << "Now saving signed distance map...";
saveFidsSignedDistanceMap(OutputImage, debug_prefix, debug_suffix, radiusFactor);
//saveFidsSignedDistanceMap(OutputImage, scaledDT, debug_prefix, debug_suffix, radiusFactor);
std::cout << "Done\n";
}
}
/* Prateep */
void generateFaceIndexMapViaSuperVoxelSerial(itk::Image<PixelType, 3>::ConstPointer narrowBand, itk::Image<PixelType, 3>::Pointer scaledDT, float q, float ldelta,
int number_of_subvoxels = 1, int number_of_voxels = 1, float radiusFactor = 1.0, // search for all neighbors
std::string debug_prefix = "")
{
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
this->number_of_voxels = number_of_voxels;
const double eps = 1e-6;
// inline DeepCopy
itk::Image<int, 3>::Pointer OutputImage = itk::Image<int, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
//itk::ImageRegionIteratorWithIndex< itk::Image<int,3> > narrowBandItShared(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<int,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( (int) narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
//narrowBandItShared.GoToBegin();
OutputImageIt.GoToBegin();
#ifdef MP_USE_OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
map<int, vector<int> > superVoxelFaceList;
int nf = this->faces.size();
itk::Image<int, 3>::PointType ori = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType sp = OutputImage->GetSpacing();
itk::Image<int, 3>::SizeType si = OutputImage->GetLargestPossibleRegion().GetSize();
float supVoxelOrigin[3];
supVoxelOrigin[0] = (float) (ori[0]);
supVoxelOrigin[1] = (float) (ori[1]);
supVoxelOrigin[2] = (float) (ori[2]);
float supVoxelSpacing[3];
supVoxelSpacing[0] = (float) (sp[0] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[1] = (float) (sp[1] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[2] = (float) (sp[2] * number_of_voxels * number_of_subvoxels);
int supVoxelSize[3];
supVoxelSize[0] = (int) ((si[0] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[1] = (int) ((si[1] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[2] = (int) ((si[2] / (float)number_of_subvoxels) / (float)number_of_voxels);
/***
* ver 1. Search each face and map it onto a supervoxell
for(int f = 0; f < nf; f++)
{
// points in physical coordinates
point v0 = this->vertices[ this->faces[f].v[0] ],
v1 = this->vertices[ this->faces[f].v[1] ],
v2 = this->vertices[ this->faces[f].v[2] ];
VoxelType ind0 = this->physicalPointToLinearIndex(v0, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
VoxelType ind1 = this->physicalPointToLinearIndex(v1, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
VoxelType ind2 = this->physicalPointToLinearIndex(v2, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind0].insert(f);
superVoxelFaceList[ind1].insert(f);
superVoxelFaceList[ind2].insert(f);
}
*/
/***
* ver 2.
*/
// 1. Compute \sigma --> maximum physical distance for each supVoxel from isosurface
//int sigmaSV = -LARGENUM;
int iter = 0;
for(int i = 0; i < supVoxelSize[0]; i++) {
for(int j = 0; j < supVoxelSize[1]; j++) {
for(int k = 0; k < supVoxelSize[2]; k++) {
VoxelIndexType p[3];
p[0] = i; p[1] = j; p[2] = k;
point supV = this->indexToPhysicalPoint(p, supVoxelOrigin, supVoxelSpacing);
point supVCent = supV;
itk::Image<int, 3>::IndexType supVInd;
for(int ii = 0; ii < 3; ii++) supVInd[ii] = supV[ii];
int sigma = scaledDT->GetPixel(supVInd);
// 2. Get ball radius
float ballRadiusSV = q + std::sqrt(ldelta*ldelta + sigma*sigma);
ballRadiusSV /= radiusFactor;
// Get center
for(int ii = 0; ii < 3; ii++) {
supVCent[ii] += (float) q/2;
}
// 3. Get supVoxelFaceList
for(int f = 0; f < nf; f++) {
//std::cout << "Face # : " << f << std::endl;
point pp;
double d = this->pointTriangleDistance(supVCent, this->faces[f], pp);
if(d < ballRadiusSV + eps)
{
VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind].push_back(f);
}
}
iter++;
std::cout << "iter : " << iter << "\n";
}
}
}
/* debug - prateep */
std::string outfilename = "/home/sci/prateepm/Public/ForShireen/sphere/superVoxelList.txt";
std::ofstream fout( outfilename.c_str(), std::ios::out);
for(map<int, vector<int> >::iterator it = superVoxelFaceList.begin(); it != superVoxelFaceList.end(); it++)
{
fout << (int) (*it).first << ": ";
for(vector<int>::iterator it1 = it->second.begin(); it1 != it->second.end(); it1++) {
fout << (*it1) << " ";
}
fout << (int) (*it).second.size();
fout << std::endl;
}
fout.close();
/* debug - prateep */
int stop = 1;
int fid = -1;
int tn;
//#pragma omp parallel private(tn,fid,narrowBandIt)
{
#ifdef MP_USE_OPENMP
tn = omp_get_thread_num();
#endif
std::cout << "\nExecuting thread : " << tn << std::endl;
while( stop != 0 ) {
//#pragma omp critical
{
// Voxel is in narrow band
if(narrowBandIt.Get() == 1)
{
point tmPoint;
itk::Image<int, 3>::PointType itkPoint;
narrowBand->TransformIndexToPhysicalPoint(narrowBandIt.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
VoxelIndexType vox_ind = this->physicalPointToLinearIndex(tmPoint, this->imageOrigin, this->imageSpacing, this->imageSize);
// if (vox_ind == 25744820) // shireen
if(vox_ind == 2501770)
int tst = 0;
// Get neartest k vertices
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// find triangles enclosed inside each supervoxel
VoxelIndexType tmpInd = this->physicalPointToLinearIndex(tmPoint, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// std::cout << "Super Voxel List : " << superVoxelFaceList[tmpInd].size() << std::endl;
for(vector<int>::iterator it = superVoxelFaceList[tmpInd].begin(); it != superVoxelFaceList[tmpInd].end(); it++) {
adjFaces.push_back((*it));
}
std::cout << "Number of neighbors : " << adjFaces.size() << std::endl;
if(adjFaces.empty() ) //|| adjFaces.size() == nf)
{
// We can either abort here or ignore the voxel
OutputImageIt.Set(-1);
} else {
//std::cout << "Adjacent faces : " << this->adjacentfaces[imatch].size() << std::endl;
double minDist = LARGENUM;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = this->pointTriangleDistance(tmPoint, this->faces[*(adjFacesIt)], projPoint);
int region = this->pointTriangleRegion(tmPoint, this->faces[*(adjFacesIt)]);
// if (vox_ind == 2501770)
// std::cout << "Adjacent faceId : " << *(adjFacesIt) << ", Dist : " << dist << ", Region : " << region << std::endl;
if(dist + eps <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
OutputImageIt.Set(fid);
adjFaces.clear();
}
} else {
OutputImageIt.Set(-1);
}
++narrowBandIt;
++OutputImageIt;
}
if(narrowBandIt.IsAtEnd() || OutputImageIt.IsAtEnd())
{
stop = 0;
//#pragma omp flush(stop)
}
}
}
if(debug_prefix.compare ("") != 0)
{
itk::ImageFileWriter< itk::Image<int,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<int, 3> >::New();
std::stringstream ss; ss << radiusFactor;
std::stringstream sp; sp << this->imageSpacing[0];
std::string f = debug_prefix + ".faceInd_r" + ss.str() + "_sp" + sp.str() + ".nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
}
// Collect values in faceIndexMap
itk::Image<int, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<int, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<int, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
VoxelIndexType minIndex = this->imageIndex[0] + this->imageIndex[1] * this->imageSize[0] + this->imageIndex[2] * this->imageSize[0] * this->imageSize[1];
VoxelIndexType maxIndex = (this->imageSize[0]-1) + (this->imageSize[1]-1) * this->imageSize[0] + (this->imageSize[2]-1) * this->imageSize[0] * this->imageSize[1];
for(unsigned int i = index[0]; i < size[0]; i++) { // X
for(unsigned int j = index[1]; j < size[1]; j++) { // Y
for(unsigned int k = index[2]; k < size[2]; k++) { // Z
itk::Image<int, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
if(OutputImage->GetPixel(idx) > -1) {
itk::Image<int, 3>::IndexType idx2;
idx2[0] = (int) (i / number_of_subvoxels);
idx2[1] = (int) (j / number_of_subvoxels);
idx2[2] = (int) (k / number_of_subvoxels);
VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
this->faceIndexMap[idx1].push_back( OutputImage->GetPixel(idx) );
if(idx1+1 <= maxIndex)
this->faceIndexMap[idx1+1].push_back( OutputImage->GetPixel(idx) );
if(idx1-1 >= minIndex)
this->faceIndexMap[idx1-1].push_back( OutputImage->GetPixel(idx) );
}
}
}
}
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
}
int GetTriangleInfoForPoint(point x, Face& triangleX, float& alphaX, float& betaX, float& gammaX)
{
int faceID;
if(this->faceIndexMap.size() > 0) // there is a generated face index map so used it
{
// Physical point to Image Index
VoxelIndexType linearIndX = this->physicalPointToLinearIndex(x);
// collect face indices for this voxel
std::map<VoxelIndexType, vector<int> >::iterator it = this->faceIndexMap.find(linearIndX);
if(it != this->faceIndexMap.end()) // see if the linearIndX already exist in the face index map
{
// std::cout << "WOW, fids will be used ... \n" ;
vector<int> faceList = this->faceIndexMap[linearIndX];
double minDist = LARGENUM;
int winnerIndex;
for(vector<int>::iterator it = faceList.begin(); it != faceList.end(); ++it)
{
triangleX = this->faces[(*it)];
// project the point onto the plane of the current triangle
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
if (dist < minDist )
{
minDist = dist;
winnerIndex = (*it);
}
}
triangleX = this->faces[winnerIndex];
faceID = winnerIndex;
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
vec barycentric = this->ComputeBarycentricCoordinates(projPoint, triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
}
else //kdtree based
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because voxel index " << linearIndX <<": "<< x <<" is not found in the face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
int vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
faceID = this->adjacentfaces[vertX][fNumber] ;
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
}
else
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because there is no face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
int vertX = this->FindNearestVertex(x);
unsigned int fNumber;
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
faceID = this->adjacentfaces[vertX][0];
for (fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
faceID = this->adjacentfaces[vertX][fNumber];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
if(alphaX < 0.0 || betaX < 0.0f || gammaX < 0.0f ) {
int t = 0;
}
}
return faceID;
}
int GetVertexInfoForPoint(point x)
{
int vertX;
Face triangleX;
float alphaX, betaX, gammaX;
if(this->faceIndexMap.size() > 0) // there is a generated face index map so used it
{
//std::cout << "WOW, fids will be used ... \n" ;
// Physical point to Image Index
VoxelIndexType linearIndX = this->physicalPointToLinearIndex(x);
// collect face indices for this voxel
std::map<VoxelIndexType, vector<int> >::iterator it = this->faceIndexMap.find(linearIndX);
if(it != this->faceIndexMap.end())
{
vector<int> faceList = this->faceIndexMap[linearIndX];
double minDist = LARGENUM;
int winnerIndex;
for(vector<int>::iterator it = faceList.begin(); it != faceList.end(); ++it)
{
triangleX = this->faces[(*it)];
// project the point onto the plane of the current triangle
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
if (dist < minDist )
{
minDist = dist;
winnerIndex = (*it);
}
}
triangleX = this->faces[winnerIndex];
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
vec barycentric = this->ComputeBarycentricCoordinates(projPoint, triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
// get vertex closest to first point - x
vertX = this->FindNearestVertex(projPoint);
}
else //kdtree based
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because voxel index " << linearIndX << " is not found in the face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
}
else
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because there is no face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
return vertX;
}
// end SHIREEN
float GetEuclideanDistance(int v1,int v2)
{
float d = 0.000001f;
point p1, p2;
p1 = this->vertices[v1];
p2 = this->vertices[v2];
d = dist(p1,p2);
return d;
}
// SHIREEN
float GetEuclideanDistance(point p1, point p2)
{
float d = 0.000001f;
d = dist(p1,p2);
return d;
}
// end SHIREEN
float GetGeodesicDistance(int v1,int v2)
{
float gDist = 0.000001f;
if (v1 == v2) return gDist;
int vert = v1;
int key = v2;
if (v2 > v1)
{
vert = v2;
key = v1;
}
std::map<unsigned int,float>::iterator geoIter = this->geodesicMap[vert].find(key);
if (geoIter != this->geodesicMap[vert].end())
{
gDist = geoIter->second;
}
else
{
gDist = LARGENUM;
}
return gDist;
}
/* Prateep */
double GetGeodesicDistance(point x, point y)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
float alphaY, betaY, gammaY;
Face triangleY;
GetTriangleInfoForPoint(y, triangleY, alphaY, betaY, gammaY);
// compute geodesic distance by interpolation
// level one, interpolate distance from source triangle to distination point (i.e. D(triangleX, y))
float dx0y = ( alphaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[2] ) );
float dx1y = ( alphaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[2] ) );
float dx2y = ( alphaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[2] ) );
// level 2, interpolate distance between x & y
float dxy = (alphaX * dx0y) + (betaX * dx1y) + (gammaX * dx2y);
return dxy;
}
/* Praful */
float TestReport(char* str, int numTri, int numPts)
{
// std::cout<<"Generating Test Report..."<<std::endl;
std::ofstream myfile;
myfile.open(str);
int numFaces = this->faces.size();
char str1[] = "Newton";
char str2[] = "Bary";
char str3[] = "LM";
int counter = 0;
for(int i=0; i<numTri; i++)
{
// std::cout<<"Counter: "<<++counter<<std::endl;
int f1 = rand() % numFaces;
int f2 = rand() % numFaces;
// int tmpf = f1 + 1 + (rand() % 5);
// std::cout<<"tmpf = "<<tmpf<<std::endl;
// double param = (double) (tmpf)/ (double) (numFaces);
// double intpart, fractpart;
// fractpart = std::modf(param, &intpart);
// int f2 = (int) std::floor(fractpart*(double)numFaces);
Face Sa = this->faces[f1];
Face Sb = this->faces[f2];
point pta, ptb;
// std::cout<<"Original: "<<std::endl;
// std::cout<<"Face a: "<<Sa.v[0]<<" "<<Sa.v[1]<<" "<<Sa.v[2]<<std::endl;
// std::cout<<"Face b: "<<Sb.v[0]<<" "<<Sb.v[1]<<" "<<Sb.v[2]<<std::endl;
for(int j=0; j<numPts; j++)
{
float alp = (float)(rand() % 50) / 100.0f;
float bet = (float)(rand() % 50) / 100.0f;
float gam = 1.0f - alp - bet;
// std::cout<<"numFaces = "<<numFaces<<std::endl;
std::cout<<"f1 = "<<f1<<" f2 = "<<f2<<std::endl;
std::cout<<"Original Barycoordinates: "<<std::endl;
std::cout<<alp<<" "<<bet<<" "<<gam<<std::endl;
vnl_vector <float> baryCoord(3);
baryCoord[0] = alp;
baryCoord[1] = bet;
baryCoord[2] = gam;
for(int ii=0; ii<3; ii++)
{
pta[ii] = alp * (float)(this->vertices[Sa.v[0]][ii]) + bet * (float)(this->vertices[Sa.v[1]][ii]) + gam*(float)(this->vertices[Sa.v[2]][ii]);
ptb[ii] = alp * (float)(this->vertices[Sb.v[0]][ii]) + bet * (float)(this->vertices[Sb.v[1]][ii]) + gam*(float)(this->vertices[Sb.v[2]][ii]);
}
float ctheta = (pta DOT ptb) / (len(pta) * len(ptb));
if(ctheta > 1.0f) ctheta = 1.0f;
else if(ctheta < -1.0f) ctheta = -1.0f;
float gth = std::acos(ctheta) * this->bsphere.r;
// std::cout<<"check1"<<std::endl;
float valApprox = this->GetBronsteinGeodesicDistance(pta, ptb, str3);
// float valApprox = this->GetBronsteinGeodesicDistance(pta, ptb, str3, Sa, Sb, baryCoord, baryCoord); %debugging
// std::cout<<"check2"<<std::endl;
float valBary = this->GetBronsteinGeodesicDistance(pta, ptb, str2);//, Sa, Sb, baryCoord, baryCoord);
// float valNewton = this->GetBronsteinGeodesicDistance(pta, ptb, str1, Sa, Sb, baryCoord, baryCoord);
myfile<<f1<<"\t"<<f2<<"\t"<<alp<<"\t"<<bet<<"\t"<<gam<<"\t"<<gth<<"\t"<<valApprox<<"\t"<<valBary<<"\n";
std::cout<<"gth: "<<gth<<"\t"<<"3PtApprox: "<<valApprox<<"\tBary: "<<valBary<<"\n";
}
}
myfile.close();
return 1.0f;
}
/* Praful */
float TestApproxGeodesic(int f1, int f2)
{
Face Sa = this->faces[f1];
Face Sb = this->faces[f2];
point pta, ptb;
for(int i=0; i<3; i++)
{
pta[i] = 0.33f*this->vertices[Sa.v[0]][i] + 0.33f*this->vertices[Sa.v[1]][i] + 0.34f*this->vertices[Sa.v[2]][i];
ptb[i] = 0.33f*this->vertices[Sb.v[0]][i] + 0.33f*this->vertices[Sb.v[1]][i] + 0.34f*this->vertices[Sb.v[2]][i];
}
vnl_vector <float> baryCoord(3,0.33);
float ctheta = (pta DOT ptb) / (len(pta) * len(ptb));
if(ctheta > 1.0f) ctheta = 1.0f;
else if(ctheta < -1.0f) ctheta = -1.0f;
char method[] = "Newton";
float gth = std::acos(ctheta) * this->bsphere.r;
float val = this->GetBronsteinGeodesicDistance(pta, ptb, method);//, Sa, Sb, baryCoord, baryCoord);
std::cout<<"**************"<<std::endl;
std::cout<<"Approx value = "<<val<<std::endl;
std::cout<<"Gth value = "<<gth<<std::endl;
std::cout<<"**************"<<std::endl;
return val;
}
/* Praful */
float GetBronsteinGeodesicDistance(point a, point b, char* method)//, Face Sa, Face Sb, vnl_vector <float> baryCoord_a, vnl_vector <float> baryCoord_b)
{
Face Sa, Sb;
vnl_vector <float> baryCoord_a(3), baryCoord_b(3);
float alp_a, alp_b, bet_a, bet_b, gam_a, gam_b;
GetTriangleInfoForPoint(a, Sa, alp_a, bet_a, gam_a);
GetTriangleInfoForPoint(b, Sb, alp_b, bet_b, gam_b);
float dGeo_a_2_b = GetBronsteinGeodesicDistance(Sa, Sb, baryCoord_a, baryCoord_b, method);
return dGeo_a_2_b;
}
/* Praful */
float GetBronsteinGeodesicDistance( Face Sa, Face Sb, vnl_vector <float> baryCoord_a, vnl_vector <float> baryCoord_b, char* method)
{
point a; a.clear();
point b; b.clear();
for (int d1 = 0; d1 < 3; d1++)
{
a[d1] = 0.0;
b[d1] = 0.0;
for (int d2 = 0; d2 < 3; d2++)
{
point vt = vertices[Sa.v[d2]];
a[d1] += baryCoord_a[d2]*vt[d1];
point vt2 = vertices[Sb.v[d2]];
b[d1] += baryCoord_b[d2]*vt2[d1];
}
}
float alp_a, alp_b, bet_a, bet_b, gam_a, gam_b;
alp_a = baryCoord_a[0];
bet_a = baryCoord_a[1];
gam_a = baryCoord_a[2];
alp_b = baryCoord_b[0];
bet_b = baryCoord_b[1];
gam_b = baryCoord_b[2];
if (alp_a<0.000001f)
{
alp_a=0.000001f;
}
if (bet_a<0.000001f)
{
bet_a=0.000001f;
}
if (gam_a<0.000001f)
{
gam_a=0.000001f;
}
if (alp_b<0.000001f)
{
alp_b=0.000001f;
}
if (bet_b<0.000001f)
{
bet_b=0.000001f;
}
if (gam_b<0.000001f)
{
gam_b=0.000001f;
}
alp_a /= (alp_a + bet_a + gam_a);
bet_a /= (alp_a + bet_a + gam_a);
gam_a /= (alp_a + bet_a + gam_a);
alp_b /= (alp_b + bet_b + gam_b);
bet_b /= (alp_b + bet_b + gam_b);
gam_b /= (alp_b + bet_b + gam_b);
baryCoord_a[0]=alp_a;
baryCoord_a[1]=bet_a;
baryCoord_a[2]=gam_a;
baryCoord_b[0]=alp_b;
baryCoord_b[1]=bet_b;
baryCoord_b[2]=gam_b;
vnl_vector<float> xA(2);
vnl_vector<float> xB(2);
vnl_matrix<float> Xa(2,3);
vnl_matrix<float> Xb(2,3);
if(baryCoord_a.max_value() >1.0f || baryCoord_a.min_value()<0.0f || baryCoord_b.max_value() >1.0f || baryCoord_b.min_value()<0.0f)
{
std::cerr<<"incorrect barycentric coordinates...!!"<<std::endl;
vcl_cerr<<"baryCoord_a: "<<baryCoord_a<<std::endl;
vcl_cerr<<"baryCoord_b: "<<baryCoord_b<<std::endl;
return EXIT_FAILURE;
}
ComputeCanonicalForm(a, xA, Xa);
ComputeCanonicalForm(b, xB, Xb);
vnl_matrix<float> dA_2_B(3,3);
bool tooFar = false;
for(int i=0; i<3; i++)
{
for(int j=0; j<3; j++)
{
dA_2_B(i,j) = this->GetGeodesicDistance(Sa.v[i], Sb.v[j]);
// SHIREEN: if triangles are too far, don't bother to complete
if ( dA_2_B(i,j) == LARGENUM)
{
tooFar = true;
break;
}
}
if (tooFar)
break;
}
if (tooFar)
return LARGENUM;
vnl_vector<float> geo_approx_2_B(3);
for(int vertB_id=0; vertB_id<3; vertB_id++)
geo_approx_2_B[vertB_id] = ComputeThreePointApproximatedGeodesic(xA, baryCoord_a, Xa, dA_2_B.get_column(vertB_id), method);
float dGeo_a_2_b=0.0f;
dGeo_a_2_b = ComputeThreePointApproximatedGeodesic(xB, baryCoord_b, Xb, geo_approx_2_B, method);
return dGeo_a_2_b;
}
/* Praful */
float ComputeGradient(vnl_vector<float> x0, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_vector<float> & G)
{
G = vnl_vector<float>(2, 0.0f);
for(int k=0; k<2; k++)
{
for(int ii=0; ii<3; ii++)
{
vnl_vector<float> xi = X.get_column(ii);
vnl_vector<float> tmp = x0 - xi;
float residual = dot_product(tmp,tmp) - ds[ii]*ds[ii];
G[k] += 4*baryCoord[ii]*residual*tmp[k];
}
}
return 1.0f;
}
/* Praful */
float ComputeHessian(vnl_vector<float> x0, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_matrix<float> & H )
{
H = vnl_matrix<float>(2,2,0.0f);
for(int k=0; k<2; k++)
{
for(int kp=0; kp<2; kp++)
{
for(int ii=0; ii<3; ii++)
{
vnl_vector<float> xi = X.get_column(ii);
vnl_vector<float> tmp = x0 - xi;
float residual = dot_product(tmp,tmp) - ds[ii]*ds[ii];
if(k==kp)
{
H(k,k) += 4*baryCoord[ii]*(residual + 2*tmp[k]*tmp[k]);
}
else
{
H(k,kp) += 8*baryCoord[ii]*tmp[k]*tmp[kp];
}
}
}
}
return 1.0f;
}
/* Praful */
float ComputeThreePointApproximatedGeodesic(vnl_vector<float> x, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, char* method)
{
float geo_approx = -1.0f;
vnl_vector<float> x0;
// std::cout<<"check4"<<std::endl;
float n = GetVirtualSource(baryCoord, X, ds, x0);
// std::cout<<"check5"<<std::endl;
char check2[] = "Bary";
if(n==-1.0f || strcmp(method, check2)==0)
{
// std::cout<<"Using Bary..."<<std::endl;
geo_approx = dot_product(baryCoord, ds);
}
else
{
char check1[] = "Newton";
if(strcmp(method, check1)==0) //Newton method
{
// std::cout<<"Using Newton iterations..."<<std::endl;
// vcl_cout<<"Initial x0= "<<x0<<std::endl;
float eta = 1.0f;
for(int iter=0; iter<10; iter++)
{
vnl_matrix<float> H;
vnl_vector<float> G;
ComputeGradient(x0, baryCoord, X, ds, G);
ComputeHessian(x0, baryCoord, X, ds, H);
vnl_matrix<float> Hinv = vnl_matrix_inverse<float>(H);
x0 -= eta*Hinv*G;
}
// vcl_cout<<"Final x0= "<<x0<<std::endl;
}
else //LM method
{
// std::cout<<"LM..coming soon.."<<std::endl;
// std::cout<<"Using LM..."<<std::endl;
float v = 2.0f;
float eps1 = 0.000001f;
float eps2 = 0.000001f;
float tau = 0.001f;
int m = 3;
int n = 2;
float k = 0.0f;
float kmax = 10.0f;
// computing Jacobian
// vcl_cout<<"x0: "<<std::endl<<x0<<std::endl;
// vcl_cout<<"baryCoord: "<<std::endl<<baryCoord<<std::endl;
vnl_matrix<float> J(m, n, 0.0f);
for(int i = 0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
// vcl_cout<<"xi: "<<std::endl<<xi<<std::endl;
for(int j = 0; j<n; j++)
{
J(i,j)=2.0f * (float) (std::sqrt(baryCoord[i])) * (x0[j]-xi[j]);
}
}
// vcl_cout<<"J: "<<std::endl<<J.extract(m,n,0,0)<<std::endl;
// computing function values given the current guess
vnl_vector<float> f(m, 0.0f);
for(int i=0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x0_m_xi;
x0_m_xi = x0 - xi;
float r_i = dot_product(x0_m_xi, x0_m_xi) - di*di;
f[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
float F;
F = dot_product(f,f);
F = 0.5f*F;
vnl_matrix<float> A(n,n,0.0f);
A = J.transpose()*J;
vnl_vector<float> g(n,0.0f);
g = J.transpose()*f;
vnl_vector<float> diagA = A.get_diagonal();
float max_diagA = diagA.max_value();
float mu = tau * max_diagA;
float norm_g = g.two_norm();
vnl_matrix<float> muId(n,n,0.0f);
vnl_matrix<float> A_mu(n,n,0.0f);
vnl_matrix<float> A_mu_inv;
vnl_vector<float> hlm(n,0.0f);
vnl_vector<float> xnew(n,0.0f);
vnl_vector<float> fnew(m, 0.0f);
float Fnew=0.0f, delta_L=0.0f, rho=0.0f;
// std::cout<<"****************"<<std::endl;
bool found = norm_g <= eps1;
while(!found && k<kmax)
{
k = k + 1.0f;
muId.set_identity();
muId = mu*muId;
A_mu = A + muId;
// std::cout<<"check4"<<std::endl;
// vcl_cout<<"A: "<<std::endl<<A.extract(n,n,0,0)<<std::endl;
// std::cout<<"mu: "<<mu<<std::endl;
// vcl_cout<<"A_mu: "<<std::endl<<A_mu.extract(n,n,0,0)<<std::endl;
A_mu_inv = vnl_matrix_inverse<float>(A_mu);
// std::cout<<"check51"<<std::endl;
// vcl_cout<<"A_mu_inv: "<<std::endl<<A_mu_inv.extract(n,n,0,0)<<std::endl;
A_mu_inv = -1.0f*A_mu_inv;
// vcl_cout<<"A_mu_inv: "<<std::endl<<A_mu_inv.extract(n,n,0,0)<<std::endl;
hlm = A_mu_inv*g;
float norm_hlm = hlm.two_norm();
float norm_x0 = x0.two_norm();
if(norm_hlm <= (eps1 * (norm_x0 + eps2)))
{
found = true;
}
else
{
xnew = x0 + hlm;
for(int i = 0; i<m ; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x_m_xi;
x_m_xi = xnew - xi;
float r_i = dot_product(x_m_xi, x_m_xi) - di*di;
fnew[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
Fnew = dot_product(fnew,fnew);
Fnew = 0.5f*Fnew;
delta_L = 0.5f*dot_product(hlm, (mu*hlm-g));
rho = (F-Fnew)/delta_L;
if(rho>0.0f)
{
x0 = xnew;
// computing Jacobian
for(int i = 0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
for(int j = 0; j<n; j++)
{
J(i,j)=2.0f * (float) (std::sqrt(baryCoord[i])) * (x0[j]-xi[j]);
}
}
// computing function values given the current guess
for(int i=0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x0_m_xi;
x0_m_xi = x0 - xi;
float r_i = dot_product(x0_m_xi, x0_m_xi) - di*di;
f[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
F = dot_product(f,f);
F = 0.5f*F;
A = J.transpose()*J;
g = J.transpose()*f;
norm_g = g.two_norm();
found = norm_g <= eps1;
// std::cout<<"=================="<<std::endl;
// std::cout<<"mu= "<<mu<<std::endl;
// std::cout<<"=================="<<std::endl;
float cmp1 = 1.0f - (2.0f*rho - 1.0f)*(2.0f*rho - 1.0f)*(2.0f*rho - 1.0f);
if(0.3f > cmp1)
{
mu = mu*0.3f;
}
else
{
mu = mu*cmp1;
}
// std::cout<<"=================="<<std::endl;
// std::cout<<"cmp1= "<<cmp1<<" mu= "<<mu<<std::endl;
// std::cout<<"=================="<<std::endl;
v = 2.0f;
}
else
{
mu = mu*v;
v = 2.0f*v;
}
}
}
// vcl_cout<<x0<<std::endl;
}
geo_approx = (x0-x).two_norm();
} //end else xinit not empty
// std::cout<<"Returning geo_approx..."<<geo_approx<<std::endl;
return geo_approx;
}
/* Praful */
float GetVirtualSource(vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_vector< float > & x0)
{
// vcl_cout<<"X:"<<std::endl<<X.extract(2,3,0,0);
// vcl_cout<<"ds: "<<ds<<std::endl;
vgl_homg_point_2d<float> centre1(X(0,0), X(1,0),1);
vgl_homg_point_2d<float> centre2(X(0,1), X(1,1),1);
vgl_homg_point_2d<float> centre3(X(0,2), X(1,2),1);
vgl_conic<float> circle1(centre1, ds[0], ds[0], 0.0f);
vgl_conic<float> circle2(centre2, ds[1], ds[1], 0.0f);
vgl_conic<float> circle3(centre3, ds[2], ds[2], 0.0f);
// vcl_cout<<"Circle1: "<<circle1<<std::endl;
// vcl_cout<<"Circle2: "<<circle2<<std::endl;
// vcl_cout<<"Circle3: "<<circle3<<std::endl;
vcl_list<vgl_homg_point_2d<float> > pts1;
pts1 = vgl_homg_operators_2d<float>::intersection(circle1, circle2);
int n1 = (int) (pts1.size());
vcl_list<vgl_homg_point_2d<float> > pts2;
pts2 = vgl_homg_operators_2d<float>::intersection(circle2, circle3);
int n2 = (int) (pts2.size());
vcl_list<vgl_homg_point_2d<float> > pts3;
pts3 = vgl_homg_operators_2d<float>::intersection(circle1, circle3);
int n3 = (int) (pts3.size());
int n = n1+n2+n3;
// std::cout<<"n= "<<n<<std::endl;
if(n==0)
{
x0 = vnl_vector<float>(2,-1.0f);
return -1.0f;
}
else
{
vnl_matrix< float > xinit(2,n,0);
int i=0;
typedef vcl_list< vgl_homg_point_2d < float > > container;
vgl_homg_point_2d<float> temp;
for(container::iterator p = pts1.begin(); p!=pts1.end(); p++)
{
// std::cout<<"n1 = "<<n1<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
for(container::iterator p = pts2.begin(); p!=pts2.end(); p++)
{
// std::cout<<"n2 = "<<n2<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
for(container::iterator p = pts3.begin(); p!=pts3.end(); p++)
{
// std::cout<<"n3 = "<<n3<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
if (i==0)
{
x0 = vnl_vector<float>(2,-1.0f);
return -1.0f;
}
// vcl_cout<<"xinit:"<<std::endl<<xinit.extract(2,n,0,0)<<std::endl;
// vcl_cout<<"xinit:"<<std::endl<<xinit.extract(2,i,0,0)<<std::endl;
double minE = 10000000000.0;
int flag = 0;
int winner = -1;
for(int i1=0; i1<i; i1++)
{
double energy = 0.0;
vnl_vector<float> pt = xinit.get_column(i1);
// vcl_cout<<"pt= "<<pt<<std::endl;
for(int j=0; j<3; j++)
{
vnl_vector<float> tmp1 = pt - X.get_column(j);
float residual = std::abs(tmp1.squared_magnitude() - ds[j]*ds[j]); //write the dot product in place of tmp1.*tmp1
energy += (double)(residual*baryCoord[j]);
// float residual = tmp1.squared_magnitude() - ds[j]*ds[j]; //write the dot product in place of tmp1.*tmp1
// energy += (double)(residual*residual*baryCoord[j]);
}
// std::cout<<"Energy: "<<energy<<std::endl;
if(flag==0)
{
minE = energy;
winner = i1;
flag = 1;
}
else
{
if(energy < minE)
{
minE = energy;
winner = i1;
}
}
}
// std::cout<<winner<<std::endl;
x0 = xinit.get_column(winner);
// vcl_cout<<"x0: "<<x0<<std::endl;
return 1.0f;
}
}
/* Praful */
float ComputeCanonicalForm(point s, vnl_vector<float> & x, vnl_matrix<float> & X)//, Face S)
{
Face S;
float alpS, betS, gamS;
GetTriangleInfoForPoint(s, S, alpS, betS, gamS);
vnl_matrix<float> S_(3,3);
vnl_vector<float> muS(3,0);
for(int i = 0; i < 3; i++) {
point vertex = this->vertices[ S.v[i] ];
for(int j = 0; j < 3; j++) S_(i,j) = (float)(vertex[j]);
}
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"Face: "<<std::endl<<S_.extract(3,3,0,0)<<std::endl;
// std::cout<<"*****************"<<std::endl;
S_ = S_.transpose();
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"Transposed: "<<std::endl<<S_.extract(3,3,0,0)<<std::endl;
// std::cout<<"*****************"<<std::endl;
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 3; c++) muS[r] += S_(r,c);
muS[r] /= 3.0f;
}
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"muS: "<<std::endl<<muS<<std::endl;
// std::cout<<"*****************"<<std::endl;
// Scent = S - muS
vnl_matrix<float> Scent(3,3);
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 3; c++) Scent(r,c) = S_(r,c) - muS[r];
}
// vcl_cout<<"Scent: "<<Scent.extract(3,3,0,0)<<std::endl;
vnl_svd<float> svd(Scent);
// bool vld_svd = vnl_svd<float>::valid();
// std::cout<<"Valid SVD? "<<vld_svd<<std::endl;
// std::cout<<"checkpoint SVD"<<std::endl;
// vnl_diag_matrix<point::value_type> W_ = svd.W();
vnl_matrix<float> U_ = svd.U();
// vcl_cout<<"U_: "<<U_.extract(3,2,0,0)<<std::endl;
// std::cout<<"check32"<<std::endl;
// vnl_matrix<point::value_type> V_ = svd.V();
/* top 2 eigen vectors */
vnl_matrix<float> U(3,2);
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 2; c++) U(r,c) = U_(r,c);
}
// std::cout<<"............................"<<std::endl;
// vcl_cout<<"U: "<<U.extract(2,3,0,0)<<std::endl;
// std::cout<<"............................"<<std::endl;
/*vnl_matrix<point::value_type>*/ X = U.transpose() * Scent;
vnl_vector<float> sCent(3);
for(int c = 0; c < 3; c++) sCent[c] = s[c] - muS[c];
/*vnl_vector<point::value_type>*/ x = U.transpose() * sCent;
// std::cout<<"-----------------------------"<<std::endl;
// vcl_cout<<x<<std::endl;
// std::cout<<"-----------------------------"<<std::endl;
return 1.0f;
// printing for debugging
// std::cout<<std::endl<<std::endl<<"Canonical form computed..."<<std::endl;
// vcl_cerr<<x;
// std::cout<<std::endl;
// vcl_cerr<<X.extract(2,3,0,0);
}
// SHIREEN
void GetPointTriangleVertices(point x, point & v1, point & v2, point & v3)
{
Face triangleX;
float alphaX, betaX, gammaX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
v1[0] = this->vertices[triangleX.v[0]][0];
v1[1] = this->vertices[triangleX.v[0]][1];
v1[2] = this->vertices[triangleX.v[0]][2];
v2[0] = this->vertices[triangleX.v[1]][0];
v2[1] = this->vertices[triangleX.v[1]][1];
v2[2] = this->vertices[triangleX.v[1]][2];
v3[0] = this->vertices[triangleX.v[2]][0];
v3[1] = this->vertices[triangleX.v[2]][1];
v3[2] = this->vertices[triangleX.v[2]][2];
}
void GetPointTriangleVertices(point x, point & v1, point & v2, point & v3, ivec3 & vids)
{
Face triangleX;
float alphaX, betaX, gammaX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
v1[0] = this->vertices[triangleX.v[0]][0];
v1[1] = this->vertices[triangleX.v[0]][1];
v1[2] = this->vertices[triangleX.v[0]][2];
v2[0] = this->vertices[triangleX.v[1]][0];
v2[1] = this->vertices[triangleX.v[1]][1];
v2[2] = this->vertices[triangleX.v[1]][2];
v3[0] = this->vertices[triangleX.v[2]][0];
v3[1] = this->vertices[triangleX.v[2]][1];
v3[2] = this->vertices[triangleX.v[2]][2];
vids[0] = triangleX.v[0];
vids[1] = triangleX.v[1];
vids[2] = triangleX.v[2];
}
void GetPointBarycentricCoordinates(point x, float& alphaX, float& betaX, float& gammaX)
{
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
}
// END-SHIREEN
/* Prateep */
void saveFidsDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, double r = 1.0)
{
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
distMapIt.Set((float) d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::stringstream ss; ss << r;
std::string f = prefix + ".DistMap_r" + ss.str() + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
// shireen: provide the approximate distance transform to fix leaking artifacts that might be caused by sharp corners or irregular triangulation
//void saveFidsSignedDistanceMap(itk::Image<int,3>::Pointer fidsVolume, itk::Image<PixelType, 3>::Pointer scaledDT, std::string prefix, std::string suffix = "", double r = 1.0)
void saveFidsSignedDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, std::string suffix = "", double r = 1.0)
{
if(normals.empty()) this->need_normals();
// std::string file = prefix + ".normals.ply";
// this->write(file.c_str() );
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::Image<PixelType,3>::PointType origin = fidsVolume->GetOrigin();
itk::Image<PixelType,3>::SpacingType spacing = fidsVolume->GetSpacing();
float m_origin[3];
m_origin[0] = origin[0];
m_origin[1] = origin[1];
m_origin[2] = origin[2];
float m_spacing[3];
m_spacing[0] = spacing[0];
m_spacing[1] = spacing[1];
m_spacing[2] = spacing[2];
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
// shireen
//itk::ImageRegionIterator< itk::Image<PixelType,3> > approxDistMapIt(scaledDT, scaledDT->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
//approxDistMapIt.GoToBegin();
point tstp;
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
double sign;
double d;
if(fid == -1) {
// distMapIt.Set(-1.0);
/* Prateep :
* - fix DT to get distance values for every voxel.
*/
itk::Image<int,3>::IndexType ind_subv = fidsVolumeIt.GetIndex();
VoxelIndexType _ind_subv[3];
_ind_subv[0] = ind_subv[0];
_ind_subv[1] = ind_subv[1];
_ind_subv[2] = ind_subv[2];
point p = this->indexToPhysicalPoint(_ind_subv, m_origin, m_spacing), j;
tstp = p;
float alphaP, betaP, gammaP;
Face triangleP;
int fidP = this->GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP );
d = this->pointTriangleDistance( p, this->faces[fidP], j);
// (a) get face normal
vec v0 = this->vertices[this->faces[fidP].v[0]];
vec nv0 = this->normals[this->faces[fidP].v[0]];
vec v1 = this->vertices[this->faces[fidP].v[1]];
vec v2 = this->vertices[this->faces[fidP].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, j;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
d = this->pointTriangleDistance(p, this->faces[fid], j);
tstp = p;
// (a) get face normal
vec v0 = this->vertices[this->faces[fid].v[0]];
vec nv0 = this->normals[this->faces[fid].v[0]];
vec v1 = this->vertices[this->faces[fid].v[1]];
vec v2 = this->vertices[this->faces[fid].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
}
// if(std::fabs(sign*d + 1.0f) < EPS) {
// std::cout << "bug : " << tstp[0] << ' ' << tstp[1] << ' ' << tstp[2] << std::endl;
// }
distMapIt.Set((float)(sign*d));
// // shireen
// PixelType approx_d = approxDistMapIt.Get();
// double approx_sign;
// if(approx_d < 0.0f) approx_sign = -1.0f;
// else approx_sign = 1.0f;
// if (approx_sign == sign)
// distMapIt.Set((float)(sign*d));
// else
// {
// std::cout << "WARNING: sign doesn't match for fid = " << fid << ", using approximate distance ..." << std::endl;
// distMapIt.Set((float)(approx_d));
// }
++fidsVolumeIt;
++distMapIt;
//++approxDistMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
//std::stringstream ss; ss << r;
//std::stringstream sp; sp << this->imageSpacing[0];
//std::string f = prefix + ".SignedDistMap_r" + ss.str() + "_sp" + sp.str() + ".nrrd";
std::string f = prefix + ".SignedDistMap" + suffix + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
void resampleDistanceMap(itk::Image<PixelType,3>::Pointer img, itk::Image<PixelType,3>::Pointer output)
{
if(this->imageSize[0] == 0 || this->imageSize[1] == 0 || this->imageSize[2] == 0 ||
this->imageSpacing[0] == 0.f || this->imageSpacing[1] == 0.f || this->imageSpacing[2] == 0.f)
{
eprintf("Error!!! invalid image dimensions for resampling\n");
return;
}
typedef itk::ResampleImageFilter< itk::Image<PixelType,3>, itk::Image<PixelType,3> > ResamplerType;
// typedef itk::LinearInterpolateImageFunction< itk::Image<PixelType,3>, double> InterpolatorType;
typedef itk::BSplineInterpolateImageFunction<itk::Image<PixelType,3>, double, double> InterpolatorType;
typedef itk::IdentityTransform< double, 3> TransformType;
TransformType::Pointer identityTransform = TransformType::New();
identityTransform->SetIdentity();
InterpolatorType::Pointer interpolator = InterpolatorType::New();
interpolator->SetSplineOrder(3);
ResamplerType::Pointer resampler = ResamplerType::New();
resampler->SetTransform( identityTransform );
resampler->SetInterpolator( interpolator );
resampler->SetOutputOrigin( this->imageOrigin );
double spacing[3];
spacing[0] = this->imageSpacing[0];
spacing[1] = this->imageSpacing[1];
spacing[2] = this->imageSpacing[2];
resampler->SetOutputSpacing( spacing);
resampler->SetInput( img );
resampler->SetOutputDirection( img->GetDirection() );
itk::Size<3> size;
size[0] = this->imageSize[0];
size[1] = this->imageSize[1];
size[2] = this->imageSize[2];
resampler->SetSize( size );
resampler->Update();
// inline DeepCopy
output->SetRegions( resampler->GetOutput()->GetLargestPossibleRegion() );
output->Allocate();
output->SetOrigin( resampler->GetOutput()->GetOrigin());
output->SetSpacing( resampler->GetOutput()->GetSpacing());
output->SetDirection( resampler->GetOutput()->GetDirection());
itk::ImageRegionConstIterator < itk::Image<PixelType,3> > inputIt( resampler->GetOutput(), resampler->GetOutput()->GetLargestPossibleRegion() );
itk::ImageRegionIterator < itk::Image<PixelType,3> > outputIt(output, output->GetLargestPossibleRegion() );
while( !inputIt.IsAtEnd() )
{
outputIt.Set( inputIt.Get() );
++inputIt;
++outputIt;
}
}
/* Prateep */
void saveFidsViaSuperVoxelDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, double r = 1.0)
{
itk::Image<double,3>::Pointer distMap = itk::Image<double,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<double,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
distMapIt.Set(d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::ImageFileWriter< itk::Image<double, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<double, 3> >::New();
std::stringstream ss; ss << r;
std::string f = prefix + ".fidsSV_distMap_r" + ss.str() + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( distMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
void saveFidsViaKDtreeDistanceMap(itk::Image<PixelType,3>::Pointer fidsVolume, std::string prefix)
{
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
// (a) get face normal
vec v0 = this->vertices[this->faces[fid].v[0]];
vec nv0 = this->normals[this->faces[fid].v[0]];
vec v1 = this->vertices[this->faces[fid].v[1]];
vec v2 = this->vertices[this->faces[fid].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
double sign;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
distMapIt.Set(sign*d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::string f = prefix + ".fidsKD_distMap" + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
int FindNearestVertex(point pt)
{
if ( !kd )
{
kd = new KDtree(this->vertices);
}
if (maxEdgeLength == 0.0)
{
need_maxedgelength();
}
const float *match = kd->closest_to_pt(pt,100000.0*sqr(maxEdgeLength)); // SHIREEN - enlargen the neighborhood size for kdtree to find a match
int imatch = 0;
if (!match)
{
std::cout << "failed to find vertex within " << maxEdgeLength << " for point " << pt << ". using vertex 0" << std::endl;
return imatch;
}
imatch = (match - (const float *) &(vertices[0][0])) / 3;
return imatch;
}
// SHIREEN
float cotangent(point a, point b, point c)
{
// compute the cotangent of the non-degenerate triangle abc at vertex b
vec3 ba = (vec3) (a - b);
vec3 bc = (vec3) (c - b);
float cot = (bc DOT ba) / (EPS + len(bc CROSS ba));
return cot;
}
vec3 ComputeGeneralizedBarycentricCoordinates(point p, Face f) // suffers from numerical instability with very small faces, even worth than face area way
{
// this assumes that p is strictly within the given face
// this implementation is based on the below paper to handle numerical instability that arise in case of small area triangles:
// Meyer, Mark, Alan Barr, Haeyoung Lee, and Mathieu Desbrun. "Generalized barycentric coordinates on irregular polygons."
// Journal of graphics tools 7, no. 1 (2002): 13-22.
vec3 bCoords; bCoords.clear();
point v0,v1,v2;
v0 = this->vertices[ f.v[0] ];
v1 = this->vertices[ f.v[1] ];
v2 = this->vertices[ f.v[2] ];
point n = (v1 - v0) CROSS (v2 - v0);
normalize(n);
float area = ( (v1-v0) CROSS (v2-v0) ) DOT n ;
if (area < 0.0001) // a small face
{
float scale = 1000.0;
point center = (v0 + v1 + v2);
center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
v0 = v0 - center; v0[0] *= scale; v0[1] *= scale; v0[2] *= scale;
v1 = v1 - center; v1[0] *= scale; v1[1] *= scale; v1[2] *= scale;
v2 = v2 - center; v2[0] *= scale; v2[1] *= scale; v2[2] *= scale;
}
point vcur, vprev, vnext;
vec3 curedge;
// the edge connecting p and v0
vcur = v0; vprev = v2; vnext = v1;
curedge = (vec3) (p - vcur);
bCoords[0] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
// the edge connecting p and v1
vcur = v1; vprev = v0; vnext = v2;
curedge = (vec3) (p - vcur);
bCoords[1] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
// the edge connecting p and v2
vcur = v2; vprev = v1; vnext = v0;
curedge = (vec3) (p - vcur);
bCoords[2] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
float sum = bCoords.sum();
bCoords[0] /= sum;
bCoords[1] /= sum;
bCoords[2] /= sum;
return bCoords;
}
// end SHIREEN
vec3 ComputeBarycentricCoordinates2(point p, Face f)
{
vec3 bCoords; bCoords.clear();
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
point n = (b - a) CROSS (c - a);
normalize(n);
float denominator = ( (b - a) CROSS (c - a) ) DOT n;
bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) / denominator;
bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) / denominator;
bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) / denominator;
return bCoords;
}
vec3 ComputeBarycentricCoordinates(point p, Face f)//, bool useGeneralized = false)
{
//if (useGeneralized)
// return ComputeGeneralizedBarycentricCoordinates(p,f);
vec3 bCoords; bCoords.clear();
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
point n = (b - a) CROSS (c - a);
normalize(n);
float area = ( (b-a) CROSS (c-a) ) DOT n ;
float inv_area = 1.0f / (area + EPS);
// shireen
// if (area < 0.0001) // a small face
// {
// float scale = 1000.0;
// point center = (a + b + c);
// center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
// a = a - center; a[0] *= scale; a[1] *= scale; a[2] *= scale;
// b = b - center; b[0] *= scale; b[1] *= scale; b[2] *= scale;
// c = c - center; c[0] *= scale; c[1] *= scale; c[2] *= scale;
// point n = (b-a) CROSS (c-a);
// normalize(n);
// area = ( (b-a) CROSS (c-a) ) DOT n;
// //vec3 ab = (vec3) (b-a);
// //vec3 ac = (vec3) (c-a);
// //area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// inv_area = 1 / (area + EPS);
// }
// /* Prateep */ // this made the situation worse in case of projecting particles very near or almost on the surface
// vec3 pb = (vec3)(p - b); vec3 pa = (vec3)(p-a); vec3 pc = (vec3)(p-c);
// float normpb = len2(pb); float normpa = len2(pa); float normpc = len2(pc);
// float tot = (normpa + normpb + normpc);
// normpa = normpa / tot;
// normpb = normpb / tot;
// normpc = normpc / tot;
// if(IsCloseToAnEdge(p,f)) {
// // interpolate
// bCoords[0] = normpa;
// bCoords[1] = normpb;
// bCoords[2] = normpc;
// return bCoords;
// }
// shireen
bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
float sum = bCoords.sum();
bCoords[0] /= sum;
bCoords[1] /= sum;
bCoords[2] /= sum;
return bCoords;
}
// vec3 ComputeBarycentricCoordinates(point p, Face f)
// {
// vec3 bCoords; bCoords.clear();
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// point n = (b - a) CROSS (c - a);
// normalize(n);
// float denominator = ( (b - a) CROSS (c - a) ) DOT n;
// // if (denominator < 0.0001) // small face
// // {
// // float scale = 1000.0;
// // point center = (a + b + c);
// // center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
// // a = a - center; a[0] *= scale; a[1] *= scale; a[2] *= scale;
// // b = b - center; b[0] *= scale; b[1] *= scale; b[2] *= scale;
// // c = c - center; c[0] *= scale; c[1] *= scale; c[2] *= scale;
// // }
// // denominator = ( (b - a) CROSS (c - a) ) DOT n;
// bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) / (denominator + 1e-7);
// bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) / (denominator + 1e-7);
// bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) / (denominator + 1e-7);
// // // Transcribed from Christer Ericson's Real-Time Collision Detection
// // vec3 v0 = b - a, v1 = c - a, v2 = p - a;
// // float d00 = v0 DOT v0;
// // float d01 = v0 DOT v1;
// // float d11 = v1 DOT v1;
// // float d20 = v2 DOT v0;
// // float d21 = v2 DOT v1;
// // float Denom = (d00 * d11 - d01 * d01);
// // float invDenom = 1.0 / (d00 * d11 - d01 * d01);
// // bCoords[1] = (d11 * d20 - d01 * d21) * invDenom;
// // bCoords[2] = (d00 * d21 - d01 * d20) * invDenom;
// // bCoords[0] = 1.0f - bCoords[1] - bCoords[2];
// return bCoords;
// }
/* Prateep */
bool IsCloseToAnEdge(point p, Face f)
{
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
vec3 pb = (vec3)(p - b);
vec3 pa = (vec3)(p - a);
vec3 pc = (vec3)(p - c);
float norma; float normb;
// check if p is \epsilon close to edge BC
vec3 bc = (vec3)(c-b);
vec3 tb = (vec3)( bc CROSS pb );
norma = len2(tb);
normb = len2(bc);
if(norma < EPS * normb) {
return true;
}
// check if p is \epsilon close to edge AC
vec3 ac = (vec3)(a-c);
vec3 tc = (vec3)( ac CROSS pc );
norma = len2(tc);
normb = len2(ac);
if(norma < EPS * normb) {
return true;
}
// check if p is \epsilon close to edge AB
vec3 ab = (vec3)(b-a);
vec3 ta = (vec3)( ab CROSS pa );
norma = len2(ta);
normb = len2(ab);
if(norma < EPS * normb) {
return true;
}
return false;
}
// // map< face, ...> didnot work
// /* Prateep */
// void CacheFaceIds() // shireen
// {
// faceids.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// faceids[f] = i;
// }
// }
// void CacheAreaInvPerTriangle()
// {
// areaInvPerTri.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// point n = (b-a) CROSS (c-a);
// normalize(n);
// float denominator = ( (b-a) CROSS (c-a) ) DOT n;
// float inv_area = 1.0f / (denominator + EPS);
// areaInvPerTri.push_back((double)inv_area); // shireen
// // // SHIREEN
// // vec3 ab = (vec3) (b-a);
// // vec3 ac = (vec3) (c-a);
// // float area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// // areaInvPerTri[f] = 1.0f / (area + EPS);
// }
// }
// // SHIREEN
// void CacheAreaPerTriangle()
// {
// areaPerTri.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// // // shireen
// // vec3 ab = (vec3) (b-a);
// // vec3 ac = (vec3) (c-a);
// // float area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// // areaPerTri[f] = area;
// point n = (b-a) CROSS (c-a);
// normalize(n);
// float denominator = EPS + (( (b-a) CROSS (c-a) ) DOT n);
// areaPerTri.push_back((double)denominator); // shireen
// //areaPerTri[f] = (double)denominator;
// }
// }
// // end SHIREEN
void ReadFeatureFromFile(const char *infilename)
{
std::ifstream infile(infilename, std::ios::binary);
if (!infile.is_open())
{
std::cerr << "File Not Found: " << infilename << std::endl;
throw(1);
}
else
{
// read # vertices
unsigned int numVert;
infile.read(reinterpret_cast<char *>(&numVert), sizeof(unsigned int));
if ( numVert != this->vertices.size() )
{
std::cerr << "size of feature vector does not match # vertices in mesh" << std::endl;
throw(1);
}
else
{
// std::cout << "reading feature from file " << infilename << std::endl;
vector< float > tmpFeatureVec;
// loop over vertices
for (int i = 0; i < numVert; i++)
{
// read feature value
float value;
infile.read( reinterpret_cast<char *>(&value), sizeof(float) );
tmpFeatureVec.push_back(value);
}
this->features.push_back( tmpFeatureVec );
}
infile.close();
}
}
void ReadFeatureFromList(const char *infilename)
{
std::ifstream infile(infilename);
if (!infile.is_open())
{
std::cerr << "File Not Found" << std::endl;
throw(1); //exit(1);
}
else
{
// std::cout << "reading feature from file " << infilename << std::endl;
vector< float > tmpFeatureVec;
float value;
// loop over vertices
while (infile)
{
// read feature value
infile >> value;
tmpFeatureVec.push_back(value);
}
tmpFeatureVec.pop_back();
if ( tmpFeatureVec.size() == this->vertices.size() )
{
this->features.push_back( tmpFeatureVec );
}
else
{
std::cerr << "size of feature vector does not match # vertices in the mesh ! Aborting..." << std::endl;
throw(1);//exit(1);
}
infile.close();
}
}
/* Praful */
void ReadFeatureGradientFromFile(const char *infilename)
{
std::ifstream infile(infilename);
if (!infile.is_open())
{
std::cerr << "File Not Found" << std::endl;
throw(1);//exit(1);
}
else
{
// read # vertices
unsigned int numVert;
infile.read(reinterpret_cast<char *>(&numVert), sizeof(unsigned int));
if ( numVert != this->vertices.size() )
{
std::cerr << "size of feature vector does not match # vertices in mesh" << std::endl;
throw(1); //exit(1);
}
else
{
// std::cout << "reading feature gradient from file " << infilename << std::endl;
vector<point> tempFeatureGradVec;
// loop over vertices
for (int i = 0; i < numVert; i++)
{
// read feature gradient
point val;
float value;
for (int j = 0; j < 3; j ++)
{
infile.read( reinterpret_cast<char *>(&value), sizeof(float) );
val[j] = (float) value;
}
tempFeatureGradVec.push_back(val);
}
this->featureGradients.push_back( tempFeatureGradVec );
}
infile.close();
}
}
void WriteFeatureToFile(int featureIndex, const char *outfilename)
{
//std::ofstream outfile(outfilename, std::ios::binary);
//// write numVertices to facilitate reading later
//int numVert = this->vertices.size();
//outfile.write( reinterpret_cast<char *>(&numVert), sizeof(int) );
//// loop over each vertex
//for (int i = 0; i < numVert; i++)
//{
// // write distance to curve
// unsigned short value = this->(features[featureIndex])[i];
// outfile.write( reinterpret_cast<char *>(&value), sizeof(unsigned short) );
//}
//outfile.close();
}
/* Praful */
void GetFeatureValues(point x, std::vector<float> & vals)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
if (alphaX < 0.000001f)
alphaX = 0.000001f;
if (betaX < 0.000001f)
betaX = 0.000001f;
if (gammaX < 0.000001f)
gammaX = 0.000001f;
alphaX /= (alphaX + betaX + gammaX);
betaX /= (alphaX + betaX + gammaX);
gammaX /= (alphaX + betaX + gammaX);
vals.resize(this->GetNumberOfFeatures());
for (unsigned int i = 0; i < this->GetNumberOfFeatures(); i++)
{
float f0 = this->features[i][ triangleX.v[0] ];
float f1 = this->features[i][ triangleX.v[1] ];
float f2 = this->features[i][ triangleX.v[2] ];
vals[i] = (alphaX * f0) + (betaX * f1) + (gammaX * f2);
}
}
/* Prateep */
float GetFeatureValue(point x, int featureIndex)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
// SHIREEN
if (alphaX < 0.000001f)
alphaX = 0.000001f;
if (betaX < 0.000001f)
betaX = 0.000001f;
if (gammaX < 0.000001f)
gammaX = 0.000001f;
alphaX /= (alphaX + betaX + gammaX);
betaX /= (alphaX + betaX + gammaX);
gammaX /= (alphaX + betaX + gammaX);
// end SHIREEN
// interpolate feature values on triangle face
float f0 = this->features[featureIndex][ triangleX.v[0] ];
float f1 = this->features[featureIndex][ triangleX.v[1] ];
float f2 = this->features[featureIndex][ triangleX.v[2] ];
// SHIREEN
float featureValue = (alphaX * f0) + (betaX * f1) + (gammaX * f2);
// HACK(01/13/2015) : Prateep ( getting -ve featureValue. Incorrect nearest face).
// float featureValue = (f0 + f1 + f2) / 3.0;
// if(featureValue < 0.0f) {
// std::cout << "bug\n";
// }
return featureValue;
}
/* Prateep -- updated Praful */
point GetFeatureDerivative(point p, int fIndex = 0)
{
point dP; dP.clear();
dP[0] = 0.0f; dP[1] = 0.0f; dP[2] = 0.0f;
float alphaP, betaP, gammaP;
Face triangleP;
GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP);
if (alphaP < 0.000001f)
alphaP = 0.000001f;
if (betaP < 0.000001f)
betaP = 0.000001f;
if (gammaP < 0.000001f)
gammaP = 0.000001f;
alphaP /= (alphaP + betaP + gammaP);
betaP /= (alphaP + betaP + gammaP);
gammaP /= (alphaP + betaP + gammaP);
// compute derivative at 3 vertices (A,B,C)
int A = triangleP.v[0];
int B = triangleP.v[1];
int C = triangleP.v[2];
// // Get derivatives of Barycentric coordinates
// vec fNorm = GetFaceNormal(triangleP);
// float mag = fNorm DOT fNorm;
// mag = std::sqrt(mag);
// fNorm[0] /= mag;
// fNorm[1] /= mag;
// fNorm[2] /= mag;
// float fArea = GetFaceArea(triangleP);
// vec v0 = this->vertices[triangleP.v[0]];
// vec v1 = this->vertices[triangleP.v[1]];
// vec v2 = this->vertices[triangleP.v[2]];
// vec dAlpha = GetGradientBaryCentricCoord(fNorm, v2-v1, fArea);
// vec dBeta = GetGradientBaryCentricCoord(fNorm, v0-v2, fArea);
// vec dGamma = GetGradientBaryCentricCoord(fNorm, v1-v0, fArea);
point dA = ComputeFeatureDerivative(A,fIndex);
point dB = ComputeFeatureDerivative(B,fIndex);
point dC = ComputeFeatureDerivative(C,fIndex);
// float f0 = this->features[fIndex][A];
// float f1 = this->features[fIndex][B];
// float f2 = this->features[fIndex][C];
// interpolate
dP[0] = ( alphaP * dA[0] ) + ( betaP * dB[0] ) + ( gammaP * dC[0] );// + ( dAlpha[0] * f0 ) + ( dBeta[0] * f1 ) + ( dGamma[0] * f2 );
dP[1] = ( alphaP * dA[1] ) + ( betaP * dB[1] ) + ( gammaP * dC[1] );// + ( dAlpha[1] * f0 ) + ( dBeta[1] * f1 ) + ( dGamma[1] * f2 );
dP[2] = ( alphaP * dA[2] ) + ( betaP * dB[2] ) + ( gammaP * dC[2] );// + ( dAlpha[2] * f0 ) + ( dBeta[2] * f1 ) + ( dGamma[2] * f2 );
return dP;
}
void GetFeatureDerivativeValues(point p, std::vector<point> & vals)
{
float alphaP, betaP, gammaP;
Face triangleP;
GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP);
if (alphaP < 0.000001f)
alphaP = 0.000001f;
if (betaP < 0.000001f)
betaP = 0.000001f;
if (gammaP < 0.000001f)
gammaP = 0.000001f;
alphaP /= (alphaP + betaP + gammaP);
betaP /= (alphaP + betaP + gammaP);
gammaP /= (alphaP + betaP + gammaP);
// compute derivative at 3 vertices (A,B,C)
int A = triangleP.v[0];
int B = triangleP.v[1];
int C = triangleP.v[2];
// // Get derivatives of Barycentric coordinates
// vec fNorm = GetFaceNormal(triangleP);
// float fArea = GetFaceArea(triangleP);
// vec v0 = this->vertices[triangleP.v[0]];
// vec v1 = this->vertices[triangleP.v[1]];
// vec v2 = this->vertices[triangleP.v[2]];
// vec dAlpha = GetGradientBaryCentricCoord(fNorm, v2-v1, fArea);
// vec dBeta = GetGradientBaryCentricCoord(fNorm, v0-v2, fArea);
// vec dGamma = GetGradientBaryCentricCoord(fNorm, v1-v0, fArea);
// compute final derivatives
vals.resize(this->GetNumberOfFeatures());
point dP; dP.clear();
for (unsigned int fIndex = 0; fIndex < this->GetNumberOfFeatures(); fIndex++)
{
point dA = ComputeFeatureDerivative(A,fIndex);
point dB = ComputeFeatureDerivative(B,fIndex);
point dC = ComputeFeatureDerivative(C,fIndex);
// float f0 = this->features[fIndex][A];
// float f1 = this->features[fIndex][B];
// float f2 = this->features[fIndex][C];
dP[0] = ( alphaP * dA[0] ) + ( betaP * dB[0] ) + ( gammaP * dC[0] );// + ( dAlpha[0] * f0 ) + ( dBeta[0] * f1 ) + ( dGamma[0] * f2 );
dP[1] = ( alphaP * dA[1] ) + ( betaP * dB[1] ) + ( gammaP * dC[1] );// + ( dAlpha[1] * f0 ) + ( dBeta[1] * f1 ) + ( dGamma[1] * f2 );
dP[2] = ( alphaP * dA[2] ) + ( betaP * dB[2] ) + ( gammaP * dC[2] );// + ( dAlpha[2] * f0 ) + ( dBeta[2] * f1 ) + ( dGamma[2] * f2 );
vals[fIndex] = dP;
dP.clear();
}
}
/* Praful */
vec GetGradientBaryCentricCoord(vec fNorm, vec edge, float fArea)
{
vec gradB = edge CROSS fNorm;
gradB[0] /= 2.0*fArea;
gradB[1] /= 2.0*fArea;
gradB[2] /= 2.0*fArea;
return gradB;
}
float GetFaceArea(int fidP)
{
return GetFaceArea(this->faces[fidP]);
}
float GetFaceArea(Face fidP)
{
vec fn = GetFaceNormal(fidP);
float val = fn DOT fn;
val = 0.5*std::sqrt(val);
return val;
}
vec GetFaceNormal(int fidP)
{
return GetFaceNormal(this->faces[fidP]);
}
vec GetFaceNormal(Face fidP)
{
vec v0 = this->vertices[fidP.v[0]];
vec nv0 = this->normals[fidP.v[0]];
vec v1 = this->vertices[fidP.v[1]];
vec v2 = this->vertices[fidP.v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
return facenormal;
}
/* Praful */
point ComputeFeatureDerivative(int v,int nFeature = 0)
{
if (featureGradients.size() > 0)
return featureGradients[nFeature][v];
else
{
point df; df.clear();
df[0] = 0.0f; df[1] = 0.0f; df[2] = 0.0f;
// feature value at v
float valueV = this->features[nFeature][v];
point ptV = this->vertices[v];
// iterate over neighbors of v to compute derivative as central difference
for (unsigned int n = 0; n < this->neighbors[v].size(); n++)
{
int indexN = this->neighbors[v][n];
float valueN = this->features[nFeature][indexN];
point ptN = this->vertices[indexN];
float valueDiff = valueN - valueV;
point ptDiff = ptN - ptV;
df[0] = df[0] + valueDiff / (ptDiff[0] + 0.0001f);
df[1] = df[1] + valueDiff / (ptDiff[1] + 0.0001f);
df[2] = df[2] + valueDiff / (ptDiff[2] + 0.0001f);
}
df[0] = df[0] / (float) ( this->neighbors[v].size() );
df[1] = df[1] / (float) ( this->neighbors[v].size() );
df[2] = df[2] / (float) ( this->neighbors[v].size() );
return df;
}
}
int GetNumberOfFeatures()
{
return this->features.size();
}
// Debugging printout, controllable by a "verbose"ness parameter
static int verbose;
static void set_verbose(int);
static void (*dprintf_hook)(const char *);
static void set_dprintf_hook(void (*hook)(const char *));
static void dprintf(const char *format, ...);
// Same as above, but fatal-error printout
static void (*eprintf_hook)(const char *);
static void set_eprintf_hook(void (*hook)(const char *));
static void eprintf(const char *format, ...);
// Constructor
TriMesh() : grid_width(-1), grid_height(-1), flag_curr(0), speedType(ONE), maxEdgeLength(0.0)
{
//iMap = &geoIndex;
//dMap = &geoMap;
kd = NULL;
}
virtual ~TriMesh(){}
};
inline const TriMesh::BBox operator + (const TriMesh::BBox &b, const point &p)
{
return TriMesh::BBox(b) += p;
}
inline const TriMesh::BBox operator + (const point &p, const TriMesh::BBox &b)
{
return TriMesh::BBox(b) += p;
}
inline const TriMesh::BBox operator + (const TriMesh::BBox &b1, const TriMesh::BBox &b2)
{
return TriMesh::BBox(b1) += b2;
}
#endif
|
GB_unop__ainv_uint64_uint64.c
|
//------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_uint64_uint64
// op(A') function: GB_unop_tran__ainv_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_uint64_uint64
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
close_enter_exit.c
|
// RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// UNSUPPORTED: clang-6, clang-7, clang-8, clang-9
// Fails on amdgcn with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc = 0, *device_alloc = 0;
int *a = (int *)malloc(N * sizeof(int));
int dev = omp_get_default_device();
// Init
for (int i = 0; i < N; ++i) {
a[i] = 10;
}
host_alloc = &a[0];
//
// map + target no close
//
#pragma omp target data map(tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a used from unified memory.
if (device_alloc == host_alloc)
printf("a used from unified memory.\n");
//
// map + target with close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a copied to device.
if (device_alloc != host_alloc)
printf("a copied to device.\n");
//
// map + use_device_ptr no close
//
device_alloc = 0;
#pragma omp target data map(tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from unified memory with use_device_ptr.
if (device_alloc == host_alloc)
printf("a used from unified memory with use_device_ptr.\n");
//
// map + use_device_ptr close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from device memory with use_device_ptr.
if (device_alloc != host_alloc)
printf("a used from device memory with use_device_ptr.\n");
//
// map enter/exit + close
//
device_alloc = 0;
#pragma omp target enter data map(close, to : a[ : N])
#pragma omp target map(from : device_alloc)
{
device_alloc = &a[0];
a[0] = 99;
}
// 'close' is missing, so the runtime must check whether s is actually in
// shared memory in order to determine whether to transfer data and delete the
// allocation.
#pragma omp target exit data map(from : a[ : N])
// CHECK: a has been mapped to the device.
if (device_alloc != host_alloc)
printf("a has been mapped to the device.\n");
// CHECK: a[0]=99
// CHECK: a is present: 0
printf("a[0]=%d\n", a[0]);
printf("a is present: %d\n", omp_target_is_present(a, dev));
free(a);
// CHECK: Done!
printf("Done!\n");
return 0;
}
|
examen_static.c
|
#include <omp.h>
#include <stdio.h>
int main ()
{
int iam=0,np=1,i=0;
double start = omp_get_wtime();
#pragma omp parallel private(iam,np,i)
{
#if defined (_OPENMP)
np = omp_get_num_threads();
iam = omp_get_thread_num();
#endif
printf("Hello from thread %d out of %d \n",iam,np);
#pragma omp for schedule(static)
for(i=0;i<(np*2);i++)
{
printf("Thread %d,contador %d\n",iam,i);
}
}
double end = omp_get_wtime();
printf("start time = %f\n",start);
printf("end time = %f\n",end);
printf("diff time = %f\n",end - start);
}
|
2mm.c
|
/**
* 2mm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <[email protected]>
* Rafael Cardoso F Sousa <[email protected]>
* Luís Felipe Mattos <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <omp.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
#define GPU 1
/* Problem size. */
# define NI 1024
# define NJ 1024
# define NK 1024
# define NL 1024
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NL; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
}
void compareResults(DATA_TYPE *E, DATA_TYPE *E_GPU)
{
int i,j,fail;
fail = 0;
for (i=0; i < NL; i++)
{
for (j=0; j < NI; j++)
{
if (percentDiff(E[i*NI + j], E_GPU[i*NI + j]) > ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf(">>\n Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f%s: %d\n", ERROR_THRESHOLD, "%", fail);
}
void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
D[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
D[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
void mm2_OMP(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
#pragma omp target data device (GPU) map(from: D[:NI*NK])
{
#pragma omp target map(to: A[:NI*NK], B[:NK*NJ])
#pragma omp parallel for
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
D[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
D[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
#pragma omp target map(to: C[:NI*NJ]) map(from: E[:NI*NL])
#pragma omp parallel for
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
}
int main(int argc, char** argv)
{
double t_start, t_end, t_start_GPU, t_end_GPU;
DATA_TYPE* C;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* E_GPU;
C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
E_GPU = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
fprintf(stdout, "<< Linear Algebra: 2 Matrices Multiplications (D=A.B; E=C.D) >>\n");
fprintf(stdout, "<< Size of Matrices: 1024 * 1024 >>\n");
fprintf(stdout, "<< Computation: >>\n\n");
fprintf(stdout, " for (i = 0; i < 1024; i++) \n");
fprintf(stdout, " for (j = 0; j < 1024; j++) { \n");
fprintf(stdout, " D[i][j] = 0.0; \n");
fprintf(stdout, " for (k = 0; k < 1024; ++k) \n");
fprintf(stdout, " D[i][j] += A[i][k] * B[k][j]; \n");
fprintf(stdout, " } \n\n");
fprintf(stdout, " for (i = 0; i < 1024; i++) \n");
fprintf(stdout, " for (j = 0; j < 1024; j++) { \n");
fprintf(stdout, " E[i][j] = 0.0; \n");
fprintf(stdout, " for (k = 0; k < 1024; ++k) \n");
fprintf(stdout, " E[i][j] += C[i][k] * D[k][j]; \n");
fprintf(stdout, " } \n\n");
fprintf(stdout, "<< Initializing Matrices A, B & C ... ");
init_array(A, B, C);
fprintf(stdout, ">>\n<< Start computation of matrices D & E on CPU...>>\n");
t_start = rtclock();
mm2_cpu(A, B, C, D, E);
t_end = rtclock();
fprintf(stdout, " CPU Runtime: %0.6lfs\n", t_end - t_start);
fprintf(stdout, "<< Start computation of matrices D & E on GPU...>>\n");
t_start_GPU = rtclock();
mm2_OMP(A, B, C, D, E_GPU);
t_end_GPU = rtclock();
fprintf(stdout, " GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU);
fprintf(stdout, "<< Comparing Results ... ");
compareResults(E, E_GPU);
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_GPU);
return 0;
}
|
graph.h
|
// Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include <algorithm>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
if (out_index_ != nullptr)
num_edges_ = out_index_[num_nodes_] - out_index_[0];
else
num_edges_ = in_index_[num_nodes_] - in_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
void setGraphProperties(int64_t nodes, int64_t edges, bool isDirected)
{
num_nodes_ = nodes;
num_edges_ = edges;
directed_ = isDirected;
}
void setGraphDatastructures(DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs)
{
out_index_ = out_index;
out_neighbors_ = out_neighs;
in_index_ = in_index;
in_neighbors_ = in_neighs;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
// Is m a neighbor of n?
bool isNeighbor(NodeID_ n, NodeID_ m) const {
return std::binary_search(out_index_[n], out_index_[n+1], m);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#if 0
static DestID_** relabelIndex(const pvector<SGOffset> &offsets, DestID_* neighs, std::map<NodeID_, int64_t> reMap) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#endif
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
DestID_** returnOffsetsArray()
{
//PageRank specific
return in_index_;
}
DestID_* returnCoordsArray()
{
//PageRank specific
return in_neighbors_;
}
DestID_** out_index()
{
return out_index_;
}
DestID_* out_neighbors()
{
return out_neighbors_;
}
DestID_** in_index()
{
return in_index_;
}
DestID_* in_neighbors()
{
return in_neighbors_;
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
quantize.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((quantize_info->dither_method == NoDitherMethod) &&
(image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelInfo
*color_1,
*color_2;
ssize_t
intensity;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=(ssize_t) (GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2));
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
spmm.h
|
// ------------------------------------------------------------------------
// File: spmm.h
// S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs.
// This file implements the Sparse-Matrix-Dense-Matrix multiplication (SPMM).
// ------------------------------------------------------------------------
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz
// and Linghao Song
// GitHub repo: http://www.github.com/uuudown/S-BLAS
// PNNL-IPID: 31803-E, IR: PNNL-31803
// MIT Lincese.
// ------------------------------------------------------------------------
#ifndef SPMM_H
#define SPMM_H
#include <iostream>
#include <assert.h>
#include <omp.h>
#include <cusparse.h>
#include <nccl.h>
#include "utility.h"
#include "matrix.h"
using namespace std;
//========================== CPU baseline version ============================
template <typename IdxType, typename DataType>
void sblas_spmm_csr_cpu(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width));
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_CPU: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
for (IdxType i=0; i<pA->height; i++)
{
for(IdxType n=0; n<pB->width; n++)
{
DataType sum = 0;
for (IdxType j=pA->csrRowPtr[i]; j<pA->csrRowPtr[i+1]; j++)
{
IdxType col_A = pA->csrColIdx[j];
DataType val_A = pA->csrVal[j];
DataType val_B = pB->val[n*(pB->height)+col_A];
sum += val_A * val_B;
}
pC->val[i*(pC->width)+n] = beta*(pC->val[n*(pC->height)+i]) + alpha*sum;
}
}
}
else
{
for (IdxType i=0; i<pA->height; i++)
{
for(IdxType n=0; n<pB->width; n++)
{
DataType sum = 0;
for (IdxType j=pA->csrRowPtr[i]; j<pA->csrRowPtr[i+1]; j++)
{
IdxType col_A = pA->csrColIdx[j];
DataType val_A = pA->csrVal[j];
DataType val_B = pB->val[n*(pB->height)+col_A];
sum += val_A * val_B;
}
pC->val[n*(pC->height)+i] = beta*(pC->val[n*(pC->height)+i]) + alpha*sum;
}
}
}
}
/** Compute Sparse-Matrix-Dense-Matrix Multiplication using multi-GPUs.
* Since A and B are allocated on unified memory, there is no need for memcpy.
* The idea is to reuse A on each GPU and parition B, then each GPU calls
* cuSparse single-GPU spMM to compute its own share. For this method, there is
* no explicit inter-GPU communication required.
* --------- C = A * B -----------
* A[m*k] in CSR sparse format
* B[k*n] in column major dense format
* C[m*n] in column major dense format
*/
template <typename IdxType, typename DataType>
void sblas_spmm_csr_v1(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta,
unsigned n_gpu)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width) );
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V1: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V1: C should be in column major!" << endl;
exit(-1);
}
//Start OpenMP
#pragma omp parallel num_threads (n_gpu)
{
int i_gpu = omp_get_thread_num();
CUDA_SAFE_CALL( cudaSetDevice(i_gpu) );
cusparseHandle_t handle;
cusparseMatDescr_t mat_A;
cusparseStatus_t cusparse_status;
CHECK_CUSPARSE( cusparseCreate(&handle) );
CHECK_CUSPARSE( cusparseCreateMatDescr(&mat_A) );
CHECK_CUSPARSE( cusparseSetMatType(mat_A, CUSPARSE_MATRIX_TYPE_GENERAL) );
CHECK_CUSPARSE( cusparseSetMatIndexBase(mat_A, CUSPARSE_INDEX_BASE_ZERO) );
printf("gpu-%d m:%d,n:%ld,k:%d\n",i_gpu, pA->height, pB->get_dim_gpu_num(i_gpu), pA->width);
CHECK_CUSPARSE( cusparseDcsrmm(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
pA->height,
pB->get_dim_gpu_num(i_gpu),
pA->width,
pA->nnz,
&alpha,
mat_A,
pA->csrVal_gpu[i_gpu],
pA->csrRowPtr_gpu[i_gpu],
pA->csrColIdx_gpu[i_gpu],
pB->val_gpu[i_gpu],
pA->width,
&beta,
pC->val_gpu[i_gpu],
pC->height) );
pC->sync2cpu(i_gpu);
#pragma omp barrier
CHECK_CUSPARSE( cusparseDestroyMatDescr(mat_A) );
CHECK_CUSPARSE( cusparseDestroy(handle) );
} //end of omp
}
template <typename IdxType, typename DataType>
void sblas_spmm_csr_v2(
CsrSparseMatrix<IdxType,DataType>* pA,
DenseMatrix<IdxType,DataType>* pB,
DenseMatrix<IdxType,DataType>* pC,
DataType alpha,
DataType beta,
unsigned n_gpu)
{
assert((pA->width) == (pB->height));
assert((pA->height) == (pC->height));
assert((pB->width) == (pC->width) );
if (pB->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V2: B should be in column major!" << endl;
exit(-1);
}
if (pC->order == row_major)
{
cerr << "SBLAS_SPMM_CSR_V2: C should be in col major!" << endl;
exit(-1);
}
ncclUniqueId id;
ncclGetUniqueId(&id);
ncclComm_t comm[n_gpu];
DenseMatrix<IdxType,DataType> C_copy(pC->height, pC->width, 0., row_major);
C_copy.sync2gpu(n_gpu, replicate);
//Start OpenMP
#pragma omp parallel num_threads (n_gpu) shared (comm, id)
{
int i_gpu = omp_get_thread_num();
CUDA_SAFE_CALL( cudaSetDevice(i_gpu) );
CHECK_NCCL(ncclCommInitRank(&comm[i_gpu], n_gpu, id, i_gpu));
cusparseHandle_t handle;
cusparseMatDescr_t mat_A;
cusparseStatus_t cusparse_status;
CHECK_CUSPARSE( cusparseCreate(&handle) );
CHECK_CUSPARSE( cusparseCreateMatDescr(&mat_A) );
CHECK_CUSPARSE( cusparseSetMatType(mat_A, CUSPARSE_MATRIX_TYPE_GENERAL) );
CHECK_CUSPARSE( cusparseSetMatIndexBase(mat_A, CUSPARSE_INDEX_BASE_ZERO) );
DataType dummy_alpha = 1.0;
DataType dummy_beta = 1.0;
CHECK_CUSPARSE( cusparseDcsrmm(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
pA->get_gpu_row_ptr_num(i_gpu)-1,
pB->width,
pA->width,
pA->nnz_gpu[i_gpu],
&dummy_alpha,
mat_A,
pA->csrVal_gpu[i_gpu],
pA->csrRowPtr_gpu[i_gpu],
pA->csrColIdx_gpu[i_gpu],
pB->val_gpu[i_gpu],
pB->height,
&dummy_beta,
/*C_copy.val_gpu[i_gpu],*/
&(C_copy.val_gpu[i_gpu])[(pA->starting_row_gpu[i_gpu])],
/*&(C_copy.val_gpu[i_gpu])[(pA->starting_row_gpu[i_gpu])*(pB->width)],*/
/*C_copy.val_gpu[i_gpu] + 1,*/
//pC->width) );
pC->height) );
#pragma omp barrier
CUDA_SAFE_CALL(cudaThreadSynchronize());
#pragma omp barrier
gpu_timer nccl_timer;
nccl_timer.start_timer();
CHECK_NCCL( ncclAllReduce(C_copy.val_gpu[i_gpu], C_copy.val_gpu[i_gpu],
C_copy.get_mtx_num(), ncclDouble, ncclSum, comm[i_gpu], 0) );
CUDA_SAFE_CALL(cudaThreadSynchronize());
#pragma omp barrier
nccl_timer.stop_timer();
cout << "GPU-" << i_gpu << " NCCL Time: " << nccl_timer.measure() << "ms." << endl;
CHECK_CUSPARSE( cusparseDestroyMatDescr(mat_A) );
CHECK_CUSPARSE( cusparseDestroy(handle) );
CHECK_NCCL(ncclCommDestroy(comm[i_gpu]));
}
CUDA_CHECK_ERROR();
pC->plusDenseMatrixGPU(C_copy, alpha, beta);
}
#endif
|
dataset.h
|
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
*/
void Init(const char* data_filename);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore();
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
struct TrainingShareStates {
int num_threads = 0;
bool is_colwise = true;
bool is_use_subcol = false;
bool is_use_subrow = false;
bool is_subrow_copied = false;
bool is_constant_hessian = true;
const data_size_t* bagging_use_indices;
data_size_t bagging_indices_cnt;
int num_bin_aligned;
std::unique_ptr<MultiValBin> multi_val_bin;
std::unique_ptr<MultiValBin> multi_val_bin_subset;
std::vector<uint32_t> hist_move_src;
std::vector<uint32_t> hist_move_dest;
std::vector<uint32_t> hist_move_size;
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>
hist_buf;
void SetMultiValBin(MultiValBin* bin) {
if (bin == nullptr) {
return;
}
multi_val_bin.reset(bin);
num_threads = OMP_NUM_THREADS();
num_bin_aligned =
(bin->num_bin() + kAlignedSize - 1) / kAlignedSize * kAlignedSize;
size_t new_size = static_cast<size_t>(num_bin_aligned) * 2 * num_threads;
if (new_size > hist_buf.size()) {
hist_buf.resize(static_cast<size_t>(num_bin_aligned) * 2 * num_threads);
}
}
hist_t* TempBuf() {
if (!is_use_subcol) {
return nullptr;
}
return hist_buf.data() + hist_buf.size() - num_bin_aligned * 2;
}
void HistMove(const hist_t* src, hist_t* dest) {
if (!is_use_subcol) {
return;
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(hist_move_src.size()); ++i) {
std::copy_n(src + hist_move_src[i], hist_move_size[i],
dest + hist_move_dest[i]);
}
}
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
double** sample_values,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool>& is_feature_added) {
if (is_finish_load_) { return; }
for (auto fidx : feature_need_push_zeros_) {
if (is_feature_added[fidx]) { continue; }
const int group = feature2group_[fidx];
const int sub_feature = feature2subfeature_[fidx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f);
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
std::vector<bool> is_feature_added(num_features_, false);
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
is_feature_added[feature_idx] = true;
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
FinishOneRow(tid, row_idx, is_feature_added);
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubrow(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
MultiValBin* GetMultiBinFromSparseFeatures() const;
MultiValBin* GetMultiBinFromAllFeatures() const;
TrainingShareStates* GetShareStates(
score_t* gradients, score_t* hessians,
const std::vector<int8_t>& is_feature_used, bool is_constant_hessian,
bool force_colwise, bool force_rowwise) const;
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void InitTrain(const std::vector<int8_t>& is_feature_used,
TrainingShareStates* share_state) const;
template <bool USE_INDICES, bool USE_HESSIAN>
void ConstructHistogramsInner(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices,
data_size_t num_data, const score_t* gradients,
const score_t* hessians,
score_t* ordered_gradients,
score_t* ordered_hessians,
TrainingShareStates* share_state,
hist_t* hist_data) const;
template <bool USE_INDICES, bool ORDERED>
void ConstructHistogramsMultiVal(const data_size_t* data_indices,
data_size_t num_data,
const score_t* gradients,
const score_t* hessians,
TrainingShareStates* share_state,
hist_t* hist_data) const;
inline void ConstructHistograms(
const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
TrainingShareStates* share_state, hist_t* hist_data) const {
if (num_data <= 0) {
return;
}
bool use_indices = data_indices != nullptr && (num_data < num_data_);
if (share_state->is_constant_hessian) {
if (use_indices) {
ConstructHistogramsInner<true, false>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
} else {
ConstructHistogramsInner<false, false>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
}
} else {
if (use_indices) {
ConstructHistogramsInner<true, true>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
} else {
ConstructHistogramsInner<false, true>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
}
}
}
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, hist_t* data) const;
inline data_size_t Split(int feature, const uint32_t* threshold,
int num_threshold, bool default_left,
const data_size_t* data_indices,
data_size_t cnt, data_size_t* lte_indices,
data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(
sub_feature, threshold, num_threshold, default_left, data_indices,
cnt, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline bool IsMultiGroup(int i) const {
return feature_groups_[i]->is_multi_val_;
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
std::unordered_set<std::string> feature_name_set;
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ASCII characters in feature name.");
}
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
if (feature_name_set.count(feature_name) > 0) {
Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str());
}
feature_name_set.insert(feature_name);
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; ++i) {
int fidx = used_feature_map_[i];
if (fidx < 0) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info_string());
}
}
return bufs;
}
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void AddFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
std::vector<int> feature_need_push_zeros_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
omp-single-2.c
|
#include <omp.h>
extern void abort (void);
struct X
{
int a;
char b;
int c;
};
main()
{
int i = 0;
struct X x;
int bad = 0;
#pragma omp parallel private (i, x) shared (bad)
{
i = 5;
#pragma omp single copyprivate (i, x)
{
i++;
x.a = 23;
x.b = 42;
x.c = 26;
}
if (i != 6 || x.a != 23 || x.b != 42 || x.c != 26)
bad = 1;
}
if (bad)
abort ();
return 0;
}
|
3d7pt.c
|
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
utils.c
|
#include <string.h>
#include <getopt.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "mpi.h"
#include "utils.h"
void check_merr(int e) {
switch(e){
case 22: // EINVAL
printf("alignment error\n");
break;
case 12: //ENOMEM
printf("no sufficient memory\n");
break;
}
}
// set the default values of the kernel parameters
void param_default(Parameters *p) {
p->stencil_shape[0] = 256;
p->stencil_shape[1] = 64;
p->stencil_shape[2] = 64;
#ifdef __MIC__
p->alignment = 16;
#else
p->alignment = 8;
#endif
p->target_ts = 0; //Naive TS
p->target_kernel = 0; //Basic ISO stencil kernel
p->stencil.r = stencil_info_list[p->target_kernel].r;
p->n_tests = 3;
p->nt = 100;
p->verify = 0;
p->source=NULL;
p->verbose = 1;
p->debug = 0;
p->source_point_enabled = 0;
p->array_padding = 1;
p->mwd_type = 0;
p->use_omp_stat_sched = 0;
// diamond method
p->t_dim = -1;
// p->nb_diamond_chunk=1;
p->halo_concat = 1;
p->cache_size = 0;
// get the number of available OpenMP threads
p->num_threads = 1;
#if defined(_OPENMP)
p->num_threads = omp_get_max_threads();
#endif
// no default number of threads in all dimension, except components
p->stencil_ctx.th_x = -1;
p->stencil_ctx.th_y = -1;
p->stencil_ctx.th_z = -1;
p->stencil_ctx.th_c = -1;
p->stencil_ctx.thread_group_size = -1;
//internal affinity variables
p->th_block = 1;
p->th_stride = 1;
p->stencil_ctx.use_manual_cpu_bind=0;
p->wavefront = 1; // default to using wavefront in the tile
p->stencil_ctx.num_wf = -1;
p->stencil_ctx.bs_x = 1e7;
// Topology parameters
p->t.is_periodic[0]=0;
p->t.is_periodic[1]=0;
p->t.is_periodic[2]=0;
p->t.shape[0] = 1;
p->t.shape[1] = 1;
p->t.shape[2] = 1;
p->h[0].is_contiguous = 1;
p->h[1].is_contiguous = 1;
p->h[2].is_contiguous = 1;
p->h[0].size = 0;
p->h[1].size = 0;
p->h[2].size = 0;
p->in_auto_tuning=0;
// Initialize the default stencil coefficients values
real_t coef[] = {-0.28472, 0.16000, -0.02000, 0.00254,
-0.00018, -0.18472, 0.19, -0.0500, 0.00554, -0.0009, 0.00354};
int i;
for(i=0; i<11; i++) p->g_coef[i] = (real_t) coef[i];
// profiling information
reset_timers(&(p->prof));
}
void reset_timers(Profile * p){
p->compute = 0.;
p->communicate = 0.;
p->send_recv = 0.;
p->wait = 0.;
p->total = 0.;
p->others = 0.;
p->ts_main = 0.;
p->ts_others = 0.;
}
void reset_wf_timers(Parameters * p){
int i;
int num_thread_groups = get_ntg(*p);
// reset if the wavefront profiling is allocated
if( (p->wavefront != 0) && (p->target_ts == 2) ) {
for(i=0; i<p->num_threads; i++) p->stencil_ctx.t_wait[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.t_wf_main[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.t_wf_comm[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.t_wf_prologue[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.t_wf_epilogue[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.wf_num_resolved_diamonds[i] = 0.0;
for(i=0; i<num_thread_groups; i++) p->stencil_ctx.t_group_wait[i] = 0.0;
}
}
void arrays_allocate(Parameters *p) {
int male0, male1, male2;
uint64_t coef_size, domain_size;
switch(p->stencil.type){
case REGULAR:
male1 = posix_memalign((void **)&(p->U1), p->alignment, sizeof(real_t)*p->ln_domain); check_merr(male1);
male1 = posix_memalign((void **)&(p->U2), p->alignment, sizeof(real_t)*p->ln_domain); check_merr(male1);
if(p->stencil.time_order == 2)
male1 = posix_memalign((void **)&(p->U3), p->alignment, sizeof(real_t)*p->ln_domain); check_merr(male1);
if(p->source_point_enabled==1)
male0 = posix_memalign((void **)&(p->source), p->alignment, sizeof(real_t)*p->nt); check_merr(male0);
domain_size = 2*p->ln_domain;
break;
case SOLAR:
domain_size = p->ln_domain*12lu*2lu;
male1 = posix_memalign((void **)&(p->U1), p->alignment, sizeof(real_t)*domain_size); check_merr(male1);
p->U2 = 0;
break;
default:
printf("ERROR: unknown type of stencil\n");
exit(1);
break;
}
// allocate the size of the coefficients matrix according to the stencil type
switch(p->stencil.coeff){
case CONSTANT_COEFFICIENT:
coef_size = 10;
break;
case VARIABLE_COEFFICIENT:
coef_size = p->ln_domain*(1 + p->stencil.r);
break;
case VARIABLE_COEFFICIENT_AXSYM:
coef_size = p->ln_domain*(1 + 3*p->stencil.r);
break;
case VARIABLE_COEFFICIENT_NOSYM:
coef_size = p->ln_domain*(1 + 6*p->stencil.r);
break;
case SOLAR_COEFFICIENT:
coef_size = p->ln_domain*28lu*2lu;
break;
default:
printf("ERROR: unknown type of stencil\n");
exit(1);
break;
}
male2 = posix_memalign((void **)&(p->coef), p->alignment, sizeof(real_t)*coef_size); check_merr(male2);
if (p->verbose == 1){
if( (p->mpi_rank ==0) || (p->mpi_rank == p->mpi_size-1))
printf("[rank=%d] alloc. dom(err=%d):%fGiB coef(err=%d):%fGiB total:%fGiB\n", p->mpi_rank,
male1, sizeof(real_t)*domain_size*1.0/(1024*1024*1024),
male2, sizeof(real_t)*coef_size*1.0/(1024*1024*1024),
sizeof(real_t)*(coef_size+ domain_size)*1.0/(1024*1024*1024));
}
}
void arrays_free(Parameters *p) {
switch(p->stencil.type){
case REGULAR:
free(p->coef);
free(p->U1);
free(p->U2);
if(p->stencil.time_order == 2)
free(p->U3);
if(p->source_point_enabled==1)
free(p->source);
break;
case SOLAR:
free(p->coef);
free(p->U1);
break;
default:
printf("ERROR: unknown type of stencil\n");
exit(1);
break;
}
}
void set_centered_source(Parameters *p) {
p->source_pt[0] = (p->stencil_shape[0]+2*p->stencil.r)/2 -1;
p->source_pt[1] = (p->stencil_shape[1]+2*p->stencil.r)/2 -1;
p->source_pt[2] = (p->stencil_shape[2]+2*p->stencil.r)/2 -1;
}
void set_kernels(Parameters *p){
p->stencil.name = stencil_info_list[p->target_kernel].name;
p->stencil.coeff = stencil_info_list[p->target_kernel].coeff;
p->stencil.r = stencil_info_list[p->target_kernel].r;
p->stencil.nd = stencil_info_list[p->target_kernel].nd;
p->stencil.time_order = stencil_info_list[p->target_kernel].time_order;
p->stencil.shape = stencil_info_list[p->target_kernel].shape;
p->stencil.type = stencil_info_list[p->target_kernel].type;
p->stencil.stat_sched_func = stat_sched_func_list[p->target_kernel];
#if USE_SPLIT_STRIDE // separate central line update
if(p->stencil.type == SOLAR){
if(p->mpi_rank == 0) fprintf(stderr,"ERROR: solar kernels are not supported for separate central line update\n");
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
exit(1);
}
p->stencil.spt_blk_func = iso_ref_split; // Note keeping the stat. sched. same
if(p->stencil_ctx.thread_group_size == 1){ // use 1WD implementation
p->stencil.mwd_func = swd_iso_ref_split;
} else {
p->stencil.mwd_func = mwd_iso_ref_split;
}
// set central line updates kernels
p->stencil_ctx.clu_func = clu_func_list[p->target_kernel];
#else
p->stencil.spt_blk_func = spt_blk_func_list[p->target_kernel];
// use static openmp schedule if set for spatial blocking time steppers
if( (p->target_ts==0 || p->target_ts==1) && (p->use_omp_stat_sched==1) ){
p->stencil.spt_blk_func = stat_sched_func_list[p->target_kernel];
}
if(p->target_ts != 2) return;
p->stencil.mwd_func = mwd_list[p->mwd_type][p->target_kernel];
if(p->stencil_ctx.thread_group_size == 1){ //1WD use implementation
p->stencil.mwd_func = swd_func_list[p->target_kernel];
}
#endif
}
void standard_info_init(Parameters *p){
int i, in_dimension=0;
// count the topology dimensions containing the source point
for(i=0; i<3; i++)
if(((p->source_pt[i]-p->stencil.r) >= p->gb[i]) && ((p->source_pt[i]-p->stencil.r) <= p->ge[i])) in_dimension++;
if(in_dimension == 3){
p->has_source=1;
for(i=0; i<3; i++) p->lsource_pt[i] = p->source_pt[i] - p->gb[i];
} else{
p->has_source=0;
for(i=0; i<3; i++) p->lsource_pt[i] = -1;
}
}
void init(Parameters *p) {
int q, r, i;
set_kernels(p);
p->n_stencils = ((uint64_t) 1)* p->stencil_shape[0] * p->stencil_shape[1] * p->stencil_shape[2];
if(p->stencil.r > 10){
if(p->mpi_rank == 0) fprintf(stderr,"ERROR: Stencil operators with radius greater than 10 are not supported\n");
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
exit(1);
}
if( (p->mpi_size == 1) && (p->halo_concat ==1) ){
p->halo_concat = 0;
// if(p->verbose==1) printf("###INFO: Halo concatenation disabled. It does not make sense in single process run\n");
}
p->source_point_enabled = 0;
// compute the global boundaries of each subdomain
for(i=0; i<3; i++) {
if(p->t.shape[i] > 1) {
q = (int)(p->stencil_shape[i]/p->t.shape[i]);
r = p->stencil_shape[i] % p->t.shape[i];
if(p->t.rank_coords[i] < r) {
p->lstencil_shape[i] = q+1;
p->gb[i] = p->t.rank_coords[i] * (q+1);
}else {
p->lstencil_shape[i] = q;
p->gb[i] = r * (q+1) +
(p->t.rank_coords[i] - r) * q;
}
} else { // i.e. one rank in this dimension
p->lstencil_shape[i] = p->stencil_shape[i];
p->gb[i] = 0;
}
p->ge[i] = p->gb[i] + p->lstencil_shape[i] - 1;
}
if((p->stencil.type == SOLAR) && (p->array_padding == 1)){
if(p->mpi_rank == 0) fprintf(stdout,"WARNING: solar kernels do not support array padding\n");
p->array_padding = 0;
// MPI_Barrier(MPI_COMM_WORLD);
// MPI_Finalize();
// exit(1);
}
int padding_comp, padding_size = 0;
if (p->array_padding == 1) {
padding_comp = (p->lstencil_shape[0]+2*p->stencil.r)%p->alignment;
if (padding_comp != 0) padding_size = p->alignment - padding_comp;
}
p->ldomain_shape[0] = p->lstencil_shape[0]+2*p->stencil.r + padding_size;
p->ldomain_shape[1] = p->lstencil_shape[1]+2*p->stencil.r;
p->ldomain_shape[2] = p->lstencil_shape[2]+2*p->stencil.r;
// calculate the block size in Y to satisfy the layer condition at the spatially blocked code
p->stencil_ctx.bs_y = p->ldomain_shape[1];
if( (p->cache_size >0) && (p->target_ts !=2) ){
if(p->use_omp_stat_sched==0){
p->stencil_ctx.bs_y = (p->cache_size*1024)/((p->num_threads+2*p->stencil.r)*p->ldomain_shape[0]*sizeof(real_t));
} else {// tailored for the Xeon Phi
if(p->stencil_ctx.thread_group_size ==-1) p->stencil_ctx.thread_group_size = 1;
p->stencil_ctx.bs_y = (p->cache_size*1024)/( (p->num_threads/p->stencil_ctx.thread_group_size)* (p->stencil_ctx.thread_group_size+2*p->stencil.r)*p->ldomain_shape[0]*sizeof(real_t));
}
// set minimum block size if cache is not sufficient
if(p->stencil_ctx.bs_y == 0) p->stencil_ctx.bs_y=p->stencil.r;
}
// set the local source point and other information
switch(p->target_ts){
case 0: //set source information for standard methods
case 1:
standard_info_init(p);
break;
case 2: // intra-diamond methods
intra_diamond_info_init(p);
break;
}
p->ln_domain = ((uint64_t) 1)* p->ldomain_shape[0] * p->ldomain_shape[1]* p->ldomain_shape[2];
p->ln_stencils = ((uint64_t) 1)* p->lstencil_shape[0] * p->lstencil_shape[1] * p->lstencil_shape[2];
if(p->debug ==1){
MPI_Barrier(MPI_COMM_WORLD);
if(p->mpi_rank == 0) {
printf("\n******************************************************\n"); fflush(stdout);
printf("DEBUG domain decomposition information BEGIN\n"); fflush(stdout);
printf("******************************************************\n"); fflush(stdout); sleep(1);
}
int j,k;
for(j=0; j<p->mpi_size; j++){
if(j == p->mpi_rank){
printf("[%02d]:top(%02d,%02d,%02d)\n", p->mpi_rank, p->t.rank_coords[0], p->t.rank_coords[1], p->t.rank_coords[2]); fflush(stdout);
printf("ln_domain:%06llu lnstencil:%06llu\n", p->ln_domain, p->ln_stencils); fflush(stdout);
printf(" Local stencil Shape:(%03d,%03d,%03d)\n", p->lstencil_shape[0], p->lstencil_shape[1], p->lstencil_shape[2]); fflush(stdout);
printf(" Local domain Shape: (%03d,%03d,%03d)\n", p->ldomain_shape[0], p->ldomain_shape[1], p->ldomain_shape[2]); fflush(stdout);
printf(" Local begin: (%03d,%03d,%03d)\n", p->gb[0], p->gb[1], p->gb[2]); fflush(stdout);
printf(" Local end: (%03d,%03d,%03d)\n", p->ge[0], p->ge[1], p->ge[2]); fflush(stdout);
printf(" Local source point: (%03d,%03d,%03d)\n", p->lsource_pt[0]-p->stencil.r, p->lsource_pt[1]-p->stencil.r, p->lsource_pt[2]-p->stencil.r); fflush(stdout);
printf("\n"); fflush(stdout);
}
MPI_Barrier(MPI_COMM_WORLD);
}
if(p->mpi_rank == 0) {
printf("******************************************************\n"); fflush(stdout);
printf("DEBUG domain decomposition information END\n"); fflush(stdout);
printf("******************************************************\n\n"); fflush(stdout);
}
}
}
void init_coeff(Parameters * p) {
uint64_t i, k, ax;
uint64_t idx, f;
switch(p->stencil.coeff){
case CONSTANT_COEFFICIENT:
for(i=0;i<p->stencil.r+1;i++)
p->coef[i] = p->g_coef[i];
break;
case VARIABLE_COEFFICIENT:
for(k=0; k <= p->stencil.r; k++){
for(i=0; i<p->ln_domain; i++){
p->coef[i + k*p->ln_domain] = p->g_coef[k];
}
}
break;
case VARIABLE_COEFFICIENT_AXSYM:
// central point coeff
for(i=0; i<p->ln_domain; i++){
p->coef[i] = p->g_coef[0];
}
for(k=0; k < p->stencil.r; k++){
for(ax=0; ax<3; ax++){
for(i=0; i<p->ln_domain; i++){
p->coef[i + p->ln_domain + 3*k*p->ln_domain + ax*p->ln_domain] = p->g_coef[k+1];
}
}
}
break;
case VARIABLE_COEFFICIENT_NOSYM:
// central point coeff
for(i=0; i<p->ln_domain; i++){
p->coef[i] = p->g_coef[0];
}
for(k=0; k < p->stencil.r; k++){
for(ax=0; ax<3; ax++){
for(i=0; i<p->ln_domain; i++){
p->coef[i + p->ln_domain + 6*k*p->ln_domain + 2*ax *p->ln_domain] = p->g_coef[k+1];
p->coef[i + p->ln_domain + 6*k*p->ln_domain + (2*ax+1)*p->ln_domain] = p->g_coef[k+1];
}
}
}
break;
case SOLAR_COEFFICIENT:
for(f=0; f<28;f++){
for(idx=0;idx<p->ln_domain*2lu; idx++){
p->coef[idx+f*p->ln_domain*2lu] = p->g_coef[(idx+f*p->ln_domain*2lu)%10];
}
}
break;
default:
printf("ERROR: unknown type of stencil\n");
exit(1);
break;
}
}
void copyv(int *a, int * b, int n) {
int i;
for(i=0;i<n;i++) b[i] = a[i];
}
void copy_halo_struct(Halo a, Halo *b) {
copyv(a.shape, b->shape, 3);
copyv(a.recv_b, b->recv_b, 3);
copyv(a.recv_e, b->recv_e, 3);
copyv(a.send_b, b->send_b, 3);
copyv(a.send_e, b->send_e, 3);
b->recv_hb = a.recv_hb;
b->recv_he = a.recv_he;
b->send_hb = a.send_hb;
b->send_he = a.send_he;
}
void copy_params_struct(Parameters a, Parameters * b) {
b->alignment =a.alignment;
b->verbose =a.verbose;
b->debug =a.debug;
copyv(a.stencil_shape, b->stencil_shape, 3);
b->n_stencils = a.n_stencils;
b->ln_domain = a.ln_domain;
b->target_ts = a.target_ts;
b->target_kernel = a.target_kernel;
b->source_point_enabled = a.source_point_enabled;
b->mpi_rank = a.mpi_rank;
b->mpi_size = a.mpi_size;
b->n_tests = a.n_tests;
b->nt = a.nt;
b->verify = a.verify;
b->num_threads = a.num_threads;
b->array_padding = a.array_padding;
b->mwd_type = a.mwd_type;
b->use_omp_stat_sched = a.use_omp_stat_sched;
b->stencil_ctx.bs_y = a.stencil_ctx.bs_y;
b->stencil_ctx.bs_x = a.stencil_ctx.bs_x;
b->stencil_ctx.th_x = a.stencil_ctx.th_x;
b->stencil_ctx.th_y = a.stencil_ctx.th_y;
b->stencil_ctx.th_z = a.stencil_ctx.th_z;
b->stencil_ctx.th_c = a.stencil_ctx.th_c;
b->stencil_ctx.thread_group_size = a.stencil_ctx.thread_group_size;
b->stencil_ctx.clu_func = a.stencil_ctx.clu_func;
b->stencil_ctx.num_wf = a.stencil_ctx.num_wf;
b->stencil_ctx.setsize = a.stencil_ctx.setsize;
b->stencil_ctx.bind_masks = a.stencil_ctx.bind_masks;
b->th_block = a.th_block;
b->th_stride = a.th_stride;
b->stencil_ctx.use_manual_cpu_bind = a.stencil_ctx.use_manual_cpu_bind;
b->orig_thread_group_size = a.orig_thread_group_size;
copyv(a.source_pt, b->source_pt, 3);
copyv(a.lstencil_shape, b->lstencil_shape, 3);
copyv(a.ldomain_shape, b->ldomain_shape, 3);
copyv(a.gb, b->gb, 3);
copyv(a.ge, b->ge, 3);
copyv(a.lsource_pt, b->lsource_pt, 3);
b->has_source = a.has_source;
b->halo_concat = a.halo_concat;
b->wavefront = a.wavefront;
b->idiamond_pro_epi_logue_updates = a.idiamond_pro_epi_logue_updates;
b->t_dim = a.t_dim;
b->is_last = a.is_last;
b->in_auto_tuning = a.in_auto_tuning;
b->cache_size = a.cache_size;
// * restrict U1, * restrict U2, * restrict U3, * restrict coef, * restrict source;
//
// copy_halo_struct(a.h[0],&(b->h[0]));
// copy_halo_struct(a.h[1],&(b->h[1]));
// copy_halo_struct(a.h[2],&(b->h[2]));
b->h[0].is_contiguous = a.h[0].is_contiguous;
b->h[1].is_contiguous = a.h[1].is_contiguous;
b->h[2].is_contiguous = a.h[2].is_contiguous;
b->h[0].size = a.h[0].size;
b->h[1].size = a.h[1].size;
b->h[2].size = a.h[2].size;
b->t.right = a.t.right;
b->t.left = a.t.left;
b->t.up = a.t.up;
b->t.down = a.t.down;
b->t.front = a.t.front;
b->t.back = a.t.back;
copyv(a.t.shape, b->t.shape, 3);
copyv(a.t.is_periodic, b->t.is_periodic, 3);
copyv(a.t.rank_coords, b->t.rank_coords, 3);
b->t.cart_comm = a.t.cart_comm;
b->stencil.name = a.stencil.name;
b->stencil.r = a.stencil.r;
b->stencil.nd = a.stencil.nd;
b->stencil.time_order = a.stencil.time_order;
b->stencil.shape = a.stencil.shape;
b->stencil.coeff = a.stencil.coeff;
b->stencil.type = a.stencil.type;
b->stencil.spt_blk_func = a.stencil.spt_blk_func;
b->stencil.stat_sched_func = a.stencil.stat_sched_func;
b->stencil.mwd_func = a.stencil.mwd_func;
}
#define pU1(i,j,k) (p->U1[((k)*(p->ldomain_shape[1])+(j))*(p->ldomain_shape[0])+(i)])
#define pU2(i,j,k) (p->U2[((k)*(p->ldomain_shape[1])+(j))*(p->ldomain_shape[0])+(i)])
#define pU3(i,j,k) (p->U3[((k)*(p->ldomain_shape[1])+(j))*(p->ldomain_shape[0])+(i)])
void domain_data_fill_std(Parameters * p){
uint64_t i,j,k, gi, gj, gk;
real_t r;
int xb, xe, yb, ye, zb, ze;
#pragma omp parallel for
for(i=0; i<p->ln_domain;i++){
p->U1[i] = 0.0;
p->U2[i] = 0.0;
if(p->stencil.time_order == 2)
p->U3[i] = 0.0;
}
xb = 0;
yb = 0;
zb = 0;
xe = p->lstencil_shape[0]+2*p->stencil.r;
ye = p->lstencil_shape[1]+2*p->stencil.r;
ze = p->lstencil_shape[2]+2*p->stencil.r;
if(p->t.rank_coords[0] == 0) xb += p->stencil.r;
if(p->t.rank_coords[1] == 0) yb += p->stencil.r;
if(p->t.rank_coords[2] == 0) zb += p->stencil.r;
if(p->t.rank_coords[0] == p->t.shape[0]-1) xe -= p->stencil.r;
if(p->t.rank_coords[1] == p->t.shape[1]-1) ye -= p->stencil.r;
if(p->t.rank_coords[2] == p->t.shape[2]-1) ze -= p->stencil.r;
// fill the local stencil subdomain according to the global location and pad the boundary with zeroes
#pragma omp parallel for private(j,i,gi,gj,gk,r)
for(k=zb; k<ze; k++){
for(j=yb; j<ye; j++){
for(i=xb; i<xe; i++){
gi = i + p->gb[0];
gj = j + p->gb[1];
gk = k + p->gb[2];
r = 1.0/3 * (1.0*gi/p->stencil_shape[0] + 1.0*gj/p->stencil_shape[1] + 1.0*gk/p->stencil_shape[2]);
pU1(i, j, k) = r*1.845703;
pU2(i, j, k) = r*1.845703;
if(p->stencil.time_order == 2)
pU3(i, j, k) = r*1.845703;
}
}
}
// fill the extra allocation of diamond tiles
int gb_1;
if( (p->target_ts == 2) && (p->stencil.time_order == 2) && p->mpi_size > 1){
// last process extends to the beginning of the domain
if(p->t.rank_coords[1] == p->t.shape[1]-1) {
yb = ye + 2*p->stencil.r;
ye = yb + (p->t_dim+1+1)*p->stencil.r;
gb_1 = -yb+p->stencil.r;
} else {
yb = ye;
ye += (p->t_dim+1)*p->stencil.r;
gb_1 = p->gb[1];
}
for(k=zb; k<ze; k++){
for(j=yb; j<ye; j++){
for(i=xb; i<xe; i++){
gi = i + p->gb[0];
gj = j + gb_1;
gk = k + p->gb[2];
r = 1.0/3 * (1.0*gi/p->stencil_shape[0] + 1.0*gj/p->stencil_shape[1] + 1.0*gk/p->stencil_shape[2]);
pU1(i, j, k) = r*1.845703;
pU2(i, j, k) = r*1.845703;
if(p->stencil.time_order == 2)
pU3(i, j, k) = r*1.845703;
}
}
}
}
// set source points at the first and last YZ plains
if(p->t.rank_coords[0] == 0){
#pragma omp parallel for private(j)
for(k=0; k<p->ldomain_shape[2]; k++){
for(j=0; j<p->ldomain_shape[1]; j++){
pU1(0, j, k) += BOUNDARY_SRC_VAL;
pU2(0, j, k) += BOUNDARY_SRC_VAL;
}
}
}
if(p->t.rank_coords[0] == p->t.shape[0]-1){
#pragma omp parallel for private(j)
for(k=0; k<p->ldomain_shape[2]; k++){
for(j=0; j<p->ldomain_shape[1]; j++){
pU1(p->lstencil_shape[0]+2*p->stencil.r-1, j, k) += BOUNDARY_SRC_VAL;
pU2(p->lstencil_shape[0]+2*p->stencil.r-1, j, k) += BOUNDARY_SRC_VAL;
}
}
}
}
void domain_data_fill_solar(Parameters *p){
uint64_t f, i,j,k, gi, gj, gk;
real_t r;
int xb, xe, yb, ye, zb, ze;
for(i=0; i<p->ln_domain*24lu;i++){
p->U1[i] = 0.0;
}
xb = 0;
yb = 0;
zb = 0;
xe = p->lstencil_shape[0]+2*p->stencil.r;
ye = p->lstencil_shape[1]+2*p->stencil.r;
ze = p->lstencil_shape[2]+2*p->stencil.r;
/* if(p->t.rank_coords[0] == 0) xb += 2*p->stencil.r;
if(p->t.rank_coords[1] == 0) yb += 2*p->stencil.r;
if(p->t.rank_coords[2] == 0) zb += 2*p->stencil.r;
if(p->t.rank_coords[0] == p->t.shape[0]-1) xe -= 2*p->stencil.r;
if(p->t.rank_coords[1] == p->t.shape[1]-1) ye -= 2*p->stencil.r;
if(p->t.rank_coords[2] == p->t.shape[2]-1) ze -= 2*p->stencil.r;
*/ // fill the local stencil subdomain according to the global location and pad the boundary with zeroes
for(f=0; f<12; f++){
for(k=zb; k<ze; k++){
for(j=yb; j<ye; j++){
for(i=xb; i<xe; i++){
gi = i + p->gb[0];
gj = j + p->gb[1];
gk = k + p->gb[2];
r = 1.0/(3.0) * (1.0*gi/p->stencil_shape[0] + 1.0*gj/p->stencil_shape[1] + 1.0*gk/p->stencil_shape[2]);
p->U1[2*((k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + i +p->ln_domain*f)] = r*1.845703;
p->U1[2*((k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + i +p->ln_domain*f)+1] = r*1.845703/3.0;
}
}
}
}
/* // fill the extra allocation of diamond tiles
int gb_1;
if( (p->target_ts == 2) && (p->stencil.time_order == 2) && p->mpi_size > 1){
// last process extends to the beginning of the domain
if(p->t.rank_coords[1] == p->t.shape[1]-1) {
yb = ye + 2*p->stencil.r;
ye = yb + (p->t_dim+1+1)*p->stencil.r;
gb_1 = -yb+p->stencil.r;
} else {
yb = ye;
ye += (p->t_dim+1)*p->stencil.r;
gb_1 = p->gb[1];
}
for(k=zb; k<ze; k++){
for(j=yb; j<ye; j++){
for(i=xb; i<xe; i++){
gi = i + p->gb[0];
gj = j + gb_1;
gk = k + p->gb[2];
r = 1.0/3 * (1.0*gi/p->stencil_shape[0] + 1.0*gj/p->stencil_shape[1] + 1.0*gk/p->stencil_shape[2]);
pU1(i, j, k) = r*1.845703;
pU2(i, j, k) = r*1.845703;
if(p->stencil.time_order == 2)
pU3(i, j, k) = r*1.845703;
}
}
}
}
*/
/* // set source points at the first and last YZ plains
for(f=0; f<12;f++){
if(p->t.rank_coords[0] == 0){
for(k=0; k<p->ldomain_shape[2]; k++){
for(j=0; j<p->ldomain_shape[1]; j++){
for(i=0;i<2;i++){
p->U1[2*((k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + p->ln_domain*f + i)] += BOUNDARY_SRC_VAL;
p->U1[2*((k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + p->ln_domain*f + i)+1] += BOUNDARY_SRC_VAL;
}
}
}
}
if(p->t.rank_coords[0] == p->t.shape[0]-1){
for(k=0; k<p->ldomain_shape[2]; k++){
for(j=0; j<p->ldomain_shape[1]; j++){
for(i=0;i<2;i++){
p->U1[2*(p->lstencil_shape[0]+2*p->stencil.r-1 + (k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + p->ln_domain*f -i)] += BOUNDARY_SRC_VAL;
p->U1[2*(p->lstencil_shape[0]+2*p->stencil.r-1 + (k*p->ldomain_shape[1]+j)*p->ldomain_shape[0] + p->ln_domain*f -i) + 1] += BOUNDARY_SRC_VAL;
}
}
}
}
}*/
}
void domain_data_fill(Parameters *p){
switch(p->stencil.type){
case REGULAR:
domain_data_fill_std(p);
break;
case SOLAR:
domain_data_fill_solar(p);
break;
default:
printf("ERROR: unknown type of stencil\n");
exit(1);
break;
}
}
void performance_results(Parameters *p, double t, double t_max, double t_min, double t_med, double t_ts_main_max, double t_ts_main_min){
double max_comm, max_comp, max_wait, max_others, max_total;
double min_comm, min_comp, min_wait, min_others, min_total;
double mean_comm, mean_comp, mean_wait, mean_others, mean_total;
int i;
uint64_t total_stencils;
int num_thread_groups = get_ntg(*p);
// look for NANs and zero results
uint64_t k, zeroes_p, n_zeroes=0, nans=0;
for (k=0; k<p->ln_domain; k++){
// check for nan and -inf/inf
nans += (p->U1[k] * 0) != 0;
// check for zeroes
#if DP
if(fabs(p->U1[k]) < 1e-6) n_zeroes++;
#else
if(fabs(p->U1[k]) < 1e-6) n_zeroes++;
#endif
}
zeroes_p = 100*n_zeroes/p->ln_domain;
if(zeroes_p > 90){
printf("\n******************************************************\n");
printf("##WARNING[rank:%d]: %llu%% of the sub domain contains zeroes. This might result in inaccurate performance results\n", p->mpi_rank, zeroes_p);
printf("******************************************************\n\n");
}
if(nans > 0){
printf("\n******************************************************\n");
printf("##WARNING[rank:%d]: %llu nan and/or -inf/inf values in the final sub domain solution. This might result in inaccurate performance results\n", p->mpi_rank, nans);
printf("******************************************************\n\n");
}
// Collect timing statistics
ierr = MPI_Reduce(&(p->prof.compute), &max_comp, 1, MPI_DOUBLE, MPI_MAX, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.communicate), &max_comm, 1, MPI_DOUBLE, MPI_MAX, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.wait), &max_wait, 1, MPI_DOUBLE, MPI_MAX, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.others), &max_others, 1, MPI_DOUBLE, MPI_MAX, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.total), &max_total, 1, MPI_DOUBLE, MPI_MAX, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.compute), &min_comp, 1, MPI_DOUBLE, MPI_MIN, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.communicate), &min_comm, 1, MPI_DOUBLE, MPI_MIN, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.wait), &min_wait, 1, MPI_DOUBLE, MPI_MIN, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.others), &min_others, 1, MPI_DOUBLE, MPI_MIN, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.total), &min_total, 1, MPI_DOUBLE, MPI_MIN, 0, p->t.cart_comm); CHKERR(ierr);
ierr = MPI_Reduce(&(p->prof.compute), &mean_comp, 1, MPI_DOUBLE, MPI_SUM, 0, p->t.cart_comm); CHKERR(ierr); mean_comp/=p->mpi_size;
ierr = MPI_Reduce(&(p->prof.communicate), &mean_comm, 1, MPI_DOUBLE, MPI_SUM, 0, p->t.cart_comm); CHKERR(ierr); mean_comm/=p->mpi_size;
ierr = MPI_Reduce(&(p->prof.wait), &mean_wait, 1, MPI_DOUBLE, MPI_SUM, 0, p->t.cart_comm); CHKERR(ierr); mean_wait/=p->mpi_size;
ierr = MPI_Reduce(&(p->prof.others), &mean_others, 1, MPI_DOUBLE, MPI_SUM, 0, p->t.cart_comm); CHKERR(ierr); mean_others/=p->mpi_size;
ierr = MPI_Reduce(&(p->prof.total), &mean_total, 1, MPI_DOUBLE, MPI_SUM, 0, p->t.cart_comm); CHKERR(ierr); mean_total/=p->mpi_size;
////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////
// print the performance results
if (p->mpi_rank == 0) {
printf("Total memory allocation per MPI rank: %llu MiB\n", sizeof(real_t)*p->ln_domain*3/1024/1024);
printf("Total time(s): %e\n", t*p->n_tests);
printf("time/test(s): %e\n", t);
if(p->target_ts != 2){
printf("\nRANK0 GStencil/s MEDIAN: %f \n", p->ln_stencils/(1e9*t_med)*p->nt);
printf("RANK0 GStencil/s MIN: %f \n", p->ln_stencils/(1e9*t_max));
printf("RANK0 GStencil/s AVG: %f \n", p->ln_stencils/(1e9*(t/p->nt) ));
printf("RANK0 GStencil/s MAX: %f \n", p->ln_stencils/(1e9*t_min));
printf("\n******************************************************\n");
printf("RANK0 Total: %f (s) -%06.2f%%\n", p->prof.total,p->prof.total/p->prof.total*100);
printf("RANK0 Computation: %f (s) - %05.2f%%\n", p->prof.compute, p->prof.compute/p->prof.total*100);
printf("RANK0 Communication: %f (s) - %05.2f%%\n", p->prof.communicate,p->prof.communicate/p->prof.total*100);
printf("RANK0 Waiting: %f (s) - %05.2f%%\n", p->prof.wait,p->prof.wait/p->prof.total*100);
printf("RANK0 Other: %f (s) - %05.2f%%\n", p->prof.others,p->prof.others/p->prof.total*100);
printf("\n******************************************************\n");
printf("MEAN Total: %f (s) -%06.2f%%\n", mean_total,mean_total/mean_total*100);
printf("MEAN Computation: %f (s) - %05.2f%%\n", mean_comp, mean_comp/mean_total*100);
printf("MEAN Communication: %f (s) - %05.2f%%\n", mean_comm,mean_comm/mean_total*100);
printf("MEAN Waiting: %f (s) - %05.2f%%\n", mean_wait,mean_wait/mean_total*100);
printf("MEAN Other: %f (s) - %05.2f%%\n", mean_others,mean_others/mean_total*100);
printf("\n******************************************************\n");
printf("MAX Total: %f (s)\n", max_total);
printf("MAX Computation: %f (s)\n", max_comp);
printf("MAX Communication: %f (s)\n", max_comm);
printf("MAX Waiting: %f (s)\n", max_wait);
printf("MAX Other: %f (s)\n", max_others);
printf("\n******************************************************\n");
printf("MIN Total: %f (s)\n", min_total);
printf("MIN Computation: %f (s)\n", min_comp);
printf("MIN Communication: %f (s)\n", min_comm);
printf("MIN Waiting: %f (s)\n", min_wait);
printf("MIN Other: %f (s)\n", min_others);
}
if( (p->wavefront !=0) && (p->target_ts == 2) ) {
printf("\nTotal RANK0 MStencil/s MIN: %f \n", p->ln_stencils/(1e6*t_max));
printf("Total RANK0 MStencil/s MAX: %f \n", p->ln_stencils/(1e6*t_min));
printf("******************************************************\n");
total_stencils = ((uint64_t) p->ln_stencils * (uint64_t) p->nt - p->idiamond_pro_epi_logue_updates)/(1e6);
printf("MWD main-loop RANK0 MStencil/s MIN: %f\n", total_stencils*1.0/(t_ts_main_max));
printf("MWD main-loop RANK0 MStencil/s MAX: %f\n", total_stencils*1.0/(t_ts_main_min));
printf("******************************************************\n");
printf("%-27s %f (s) - %05.2f%%\n", "RANK0 ts main loop:",
p->prof.ts_main,p->prof.ts_main/p->prof.total*100);
printf("%-27s %f (s) - %05.2f%%\n", "RANK0 ts prologue/epilogue:",
p->prof.ts_others, p->prof.ts_others/p->prof.total*100);
printf("%-27s %f (s) - %05.2f%%\n","RANK0 ts others:",
p->prof.total-(p->prof.ts_main+p->prof.ts_others), (p->prof.total-(p->prof.ts_main+p->prof.ts_others))/p->prof.total*100);
printf("******************************************************\n");
printf("%-30s", "Metric \\ core:");
for(i=0; i<p->num_threads; i++) printf(" core %02d ", i);
printf("\n");
printf("%-27s", "Wavefront synchronization [s]:");
for(i=0; i<p->num_threads; i++) printf(" %e", p->stencil_ctx.t_wait[i]);
printf("\n");
printf("%-27s", "Wavefront synchronization [%]:");
for(i=0; i<p->num_threads; i++) printf(" %05.2f ", p->stencil_ctx.t_wait[i]/(p->prof.ts_main + p->prof.ts_others)*100);
printf("\n\n");
printf("%-27s", "Metric \\ thread group:");
for(i=0; i<num_thread_groups; i++) printf(" group %02d ", i);
printf("\n");
printf("%-27s", "Wavefront steady state [s]:");
for(i=0; i<num_thread_groups; i++) printf(" %e", p->stencil_ctx.t_wf_main[i]);
printf("\n");
printf("%-27s", "Wavefront steady state [%]:");
for(i=0; i<num_thread_groups; i++) printf(" %05.2f ", p->stencil_ctx.t_wf_main[i]/(p->prof.ts_main+p->prof.ts_others)*100);
printf("\n");
printf("%-27s", "Wavefront startup/end [s]:");
for(i=0; i<num_thread_groups; i++) printf(" %e", p->stencil_ctx.t_wf_prologue[i] + p->stencil_ctx.t_wf_epilogue[i]);
printf("\n");
printf("%-27s", "Wavefront startup/end [%]:");
for(i=0; i<num_thread_groups; i++) printf(" %05.2f ",
(p->stencil_ctx.t_wf_prologue[i] + p->stencil_ctx.t_wf_epilogue[i])/(p->prof.ts_main+p->prof.ts_others)*100);
printf("\n");
printf("%-27s", "Wavefront communication [s]:");
for(i=0; i<num_thread_groups; i++) printf(" %e", p->stencil_ctx.t_wf_comm[i]);
printf("\n");
printf("%-27s", "Wavefront communication [%]:");
for(i=0; i<num_thread_groups; i++) printf(" %05.2f ", p->stencil_ctx.t_wf_comm[i]/(p->prof.ts_main+p->prof.ts_others)*100);
printf("\n");
printf("%-27s", "Wavefront others [s]:");
for(i=0; i<num_thread_groups; i++) printf(" %e",
p->prof.ts_main+p->prof.ts_others - (p->stencil_ctx.t_wf_main[i] + p->stencil_ctx.t_wf_prologue[i] + p->stencil_ctx.t_wf_comm[i] + p->stencil_ctx.t_wf_epilogue[i]));
printf("\n");
printf("%-27s", "Wavefront others [%]:");
for(i=0; i<num_thread_groups; i++) printf(" %05.2f ",
(p->prof.ts_main+ p->prof.ts_others - (p->stencil_ctx.t_wf_main[i] + p->stencil_ctx.t_wf_prologue[i] + p->stencil_ctx.t_wf_comm[i] + p->stencil_ctx.t_wf_epilogue[i]))/(p->prof.ts_main+ p->prof.ts_others)*100);
printf("\n");
printf("%-27s", "Group spin-wait [s]:");
for(i=0; i<num_thread_groups; i++) printf(" %e", p->stencil_ctx.t_group_wait[i]);
printf("\n");
printf("%-27s", "Group spin-wait [%]:");
for(i=0; i<num_thread_groups; i++) printf(" %05.2f ", p->stencil_ctx.t_group_wait[i]/(p->prof.total)*100);
printf("\n");
printf("%-27s", "Resolved diamonds:");
for(i=0; i<num_thread_groups; i++) printf(" %e", p->stencil_ctx.wf_num_resolved_diamonds[i]);
}
printf("\n******************************************************\n");
}
}
void print_param(Parameters p) {
char *coeff_type, *precision;
int wf_halo = p.stencil.r;
int diam_height;
diam_height = (p.t_dim*2)*p.stencil.r + p.stencil_ctx.num_wf;
switch (p.stencil.coeff){
case CONSTANT_COEFFICIENT:
coeff_type = "constant";
break;
case VARIABLE_COEFFICIENT:
coeff_type = "variable";
break;
case VARIABLE_COEFFICIENT_AXSYM:
coeff_type = "variable axis-symmetric";
break;
case VARIABLE_COEFFICIENT_NOSYM:
coeff_type = "variable no-symmetry";
break;
case SOLAR_COEFFICIENT:
coeff_type = "Solar kernel";
break;
}
precision = ((sizeof(real_t)==4)?"SP":"DP");
printf("\n******************************************************\n");
printf("Parameters settings\n");
printf("******************************************************\n");
printf("Time stepper name: %s\n", TSList[p.target_ts].name);
printf("Stencil Kernel name: %s\n", p.stencil.name);
printf("Stencil Kernel semi-bandwidth: %d\n", p.stencil.r);
printf("Stencil Kernel coefficients: %s\n", coeff_type);
printf("Precision: %s\n", precision);
printf("Global domain size:%llu nx:%d ny:%d nz:%d\n", p.n_stencils, p.stencil_shape[0],p.stencil_shape[1],p.stencil_shape[2]);
printf("Rank 0 domain size:%llu nx:%d ny:%d nz:%d\n", p.ln_stencils, p.lstencil_shape[0],p.lstencil_shape[1],p.lstencil_shape[2]);
printf("Number of time steps: %d\n", p.nt);
printf("Alignment size: %d Bytes\n", p.alignment);
printf("Number of tests: %d\n", p.n_tests);
printf("Verify: %d\n", p.verify);
printf("Source point enabled: %d\n", p.source_point_enabled);
printf("Time unroll: %d\n", p.t_dim);
printf("Using separate call to central line update: %d\n", USE_SPLIT_STRIDE);
printf("Halo concatenation: %d\n", p.halo_concat);
// Print kernel specific parameters
switch(p.target_ts){
case 0:
case 1:
if(p.h[2].is_contiguous ==1) printf("MPI datatype is contiguous across the Z direction\n");
printf("Block size in Y: %d\n", p.stencil_ctx.bs_y);
printf("OpenMP schedule: ");
if(p.use_omp_stat_sched==1) printf("static\n"); else printf("static1\n");
break;
case 2: // dynamic scheduling intra diamond methods
printf("Enable wavefronts: %d\n", p.wavefront!=0);
// if(p.stencil_ctx.thread_group_size!=1)
printf("Wavefront parallel strategy: %s\n", MWD_name[p.mwd_type]);
printf("Intra-diamond width: %d\n", (p.t_dim+1)*2*p.stencil.r);
printf("Wavefront width: %d\n", diam_height);
printf("Cache block size/wf (kiB): %llu\n", p.wf_blk_size/1024);
printf("Total cache block size (kiB): %llu\n", (p.num_threads/p.stencil_ctx.thread_group_size) * p.wf_blk_size/1024);
printf("Next larger cache block size/wf (kiB): %llu (diam_width=%d)\n", p.wf_larger_blk_size/1024, (p.larger_t_dim+1)*2*p.stencil.r);
printf("Intra-diamond prologue/epilogue MStencils: %llu\n", p.idiamond_pro_epi_logue_updates/(1000*1000));
printf("Multi-wavefront updates: %d\n", p.stencil_ctx.num_wf);
printf("User set thread group size: %d\n", p.orig_thread_group_size);
printf("Thread group size: %d\n", p.stencil_ctx.thread_group_size);
printf("Threads along z-axis: %d\n", p.stencil_ctx.th_z);
printf("Threads along y-axis: %d\n", p.stencil_ctx.th_y);
printf("Threads along x-axis: %d\n", p.stencil_ctx.th_x);
printf("Threads per cell : %d\n", p.stencil_ctx.th_c);
printf("Threads block: %d\n", p.th_block);
printf("Threads stride: %d\n", p.th_stride);
break;
}
printf("OpenMP Threads: %d\n", p.num_threads);
printf("Assumed usable cache size: %dKiB\n", p.cache_size);
printf("MPI size: %d\n", p.mpi_size);
printf("Processors topology (npx, npy, npz): %02d,%02d,%02d\n", p.t.shape[0], p.t.shape[1], p.t.shape[2]);
printf("******************************************************\n");
}
void list_kernels(Parameters *p){
int i;
char *coeff_type;
if(p->mpi_rank==0) {
printf("Available time steppers:\n# Name\n");
i = 0;
while(1){
if (TSList[i].name == 0) break;
printf("%02d %s\n",i, TSList[i].name);
i++;
}
printf("\nAvailable stencil kernels:\n");
i = 0;
while(1){
switch (stencil_info_list[i].coeff){
case CONSTANT_COEFFICIENT:
coeff_type = "constant";
break;
case VARIABLE_COEFFICIENT:
coeff_type = "variable";
break;
case VARIABLE_COEFFICIENT_AXSYM:
coeff_type = "variable axis-symmetric";
break;
case VARIABLE_COEFFICIENT_NOSYM:
coeff_type = "variable no-symmetry";
break;
case SOLAR_COEFFICIENT:
coeff_type = "Solar kernel";
break;
}
if (stencil_info_list[i].name == 0) break;
printf("%02d stencil_op:%s time-order:%d radius:%d coeff:%s\n",
i, stencil_info_list[i].name, stencil_info_list[i].time_order, stencil_info_list[i].r,coeff_type);
i++;
}
printf("\nAvailable MWD implementations:\n# Name\n");
i = 0;
while(1){
if (MWD_name[i] == 0) break;
printf("%02d %s\n",i, MWD_name[i]);
i++;
}
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
exit(0);
}
void print_help(Parameters *p){
if(p->mpi_rank == 0){
printf(
"Note: default values are available at 'param_default()' in src/utils.c\n"
"Usage:\n\n"
" --help\n"
" Show available options\n"
" --list\n"
" List the available time stepper kernels and their number.\n"
" Description of the time steppers is available at wrappers.h\n"
" --verify <bool>\n"
" Verify the correctness of the selected time stepper using\n"
" the provided domain and topology information\n"
" (This option disables performance measurement experiments)\n"
"\nGeneral experiment parameters:\n"
" --target-ts <integer>\n"
" Select the desired time stepper kernel\n"
" (can acquire the available time stepper through --list option)\n"
// " --stencil_radius <integer>\n"
// " Select the radius of the stencil operator (maximum is 10)\n"
// " --const-coeff-stencil <bool> (default 1)"
// " Determine whether the stencil coefficients are constant (1) or variable (2)\n"
" --target-kernel <integer>\n"
" Select the desired stencil kernel\n"
" (can acquire the available kernels through --list option)\n"
" --nx <integer>\n"
" Global domain size in the x direction\n"
" --ny <integer>\n"
" Global domain size in the y direction\n"
" --nz <integer>\n"
" Global domain size in the z direction\n"
" --nt <integer>\n"
" The desired number of time steps\n"
" --npx <integer>\n"
" Number of MPI processes across the x direction\n"
" --npy <integer>\n"
" Number of MPI processes across the y direction\n"
" --npz <integer>\n"
" Number of MPI processes across the z direction\n"
" --n-tests <integer>\n"
" Specify how many times the time stepper is run at the\n"
" performance measurement experiments\n"
" --alignment <integer>\n"
" Specify the memory alignment value (in bytes) of the\n"
" allocated domain arrays\n"
// " --disable-source-point\n"
// " disables the source point update in the solution domain\n"
"\nDisplay options:\n"
" --verbose <bool>\n"
" Show the used configuration information\n"
" --debug <bool>\n"
" Print detailed debugging information\n"
"\nSpecialized arguments:\n"
" --t-dim <integer> (specific to the Diamond methods)\n"
" Specifies the value of unrolling in time\n"
" --z-mpi-contig <bool> (Specific to standard methods: 0-1)\n"
" Uses contiguous MPI datatype in the z direction of the MPI\n"
" topology at the standard methods\n"
" --halo-concatenate <integer> (experimental feature)\n"
" Explicitly concatenate halo information before send and unpack them\n"
" at receiving end using multi-threading.\n"
" (Works for decomposition across X and Y only)\n"
" --thread-group-size <integer> (specific to diamond tiling)\n"
" Set the thread group size for methods supporting multiple thread groups\n"
" --thx <integer> (specific to diamond tiling)\n"
" Set threads number along the x-axis\n"
" --thy <integer> (specific to diamond tiling)\n"
" Set threads number along the y-axis in the diamond tile\n"
" --thz <integer> (specific to diamond tiling)\n"
" Set threads number along the z-axis\n"
" --thc <integer> (specific to diamond tiling solar kernels)\n"
" Set threads number per component\n"
" --cache-size <integer>\n"
" The usable last level cache size for spatial blocking\n"
" --wavefront <bool>\n"
" Indicate whether to use wavefront in the tile. 1 for yes and 0 for no (default 1)\n"
" --num-wavefronts <int>\n"
" Set the number of wavefronts updated per wavefront iteration (default 1)\n"
" --mwd-type <int>\n"
" Select one of the MWD implementations from the one available at --list option\n"
" --use-omp-stat-sched\n"
" Use OpenMP static schedule instead of static,1 at the spatial blocking time steppers\n"
" --threads threads[:block[:stride]] (specific to diamond tiling)\n"
" Set the threads number, blocks, and strides when using internal affinitiy control\n"
// " --target-parallel-wavefront <integer>\n"
// " Indicate specify multi-threaded wavefront parallelization strategy (specific to ts 9)\n"
);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
exit(0);
}
void parse_args (int argc, char** argv, Parameters * p)
{ // for more details see http://libslack.org/manpages/getopt.3.html
int c;
char *threads=NULL;
int cache_size=-1;
while (1)
{
int option_index = 0;
static struct option long_options[] =
{
{"nz", 1, 0, 0},
{"ny", 1, 0, 0},
{"nx", 1, 0, 0},
{"nt", 1, 0, 0},
{"alignment", 1, 0, 0},
{"verbose", 1, 0, 0},
{"debug", 1, 0, 0},
{"target-ts", 1, 0, 0},
{"target-kernel", 1, 0, 0},
{"n-tests", 1, 0, 0},
{"verify", 1, 0, 0},
{"list", 0, 0, 0},
{"help", 0, 0, 0},
{"npx", 1, 0, 0},
{"npy", 1, 0, 0},
{"npz", 1, 0, 0},
{"t-dim", 1, 0, 0},
{"z-mpi-contig", 1, 0, 0},
{"disable-source-point", 0, 0, 0},
{"halo-concatenate", 1, 0, 0},
{"thread-group-size", 1, 0, 0},
{"cache-size", 1, 0, 0},
{"wavefront", 1, 0, 0},
{"num-wavefronts", 1, 0, 0},
{"pad-array", 0, 0, 0},
{"mwd-type", 1, 0, 0},
{"thx", 1, 0, 0},
{"thy", 1, 0, 0},
{"thz", 1, 0, 0},
{"thc", 1, 0, 0},
{"threads", 1, 0, 0},
{"use-omp-stat-sched", 0, 0, 0},
// {"target-parallel-wavefront", 1, 0, 0},
{0, 0, 0, 0}
};
c = getopt_long (argc, argv, "",
long_options, &option_index);
if (c == -1)
break;
switch (c)
{
case 0:
if(strcmp(long_options[option_index].name, "nz") == 0) p->stencil_shape[2] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "ny") == 0) p->stencil_shape[1] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "nx") == 0) p->stencil_shape[0] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "nt") == 0) p->nt = atoi(optarg);
else if(strcmp(long_options[option_index].name, "npx") == 0) p->t.shape[0] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "npy") == 0) p->t.shape[1] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "npz") == 0) p->t.shape[2] = atoi(optarg);
else if(strcmp(long_options[option_index].name, "alignment") == 0) p->alignment = atoi(optarg);
else if(strcmp(long_options[option_index].name, "verbose") == 0) p->verbose = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "target-ts") == 0) p->target_ts = atoi(optarg);
else if(strcmp(long_options[option_index].name, "target-kernel") == 0) p->target_kernel = atoi(optarg);
else if(strcmp(long_options[option_index].name, "n-tests") == 0) p->n_tests = atoi(optarg);
else if(strcmp(long_options[option_index].name, "verify") == 0) p->verify = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "debug") == 0) p->debug = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "t-dim") == 0) p->t_dim = atoi(optarg);
else if(strcmp(long_options[option_index].name, "z-mpi-contig") == 0) p->h[2].is_contiguous = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "list") == 0) list_kernels(p);
else if(strcmp(long_options[option_index].name, "help") == 0) print_help(p);
else if(strcmp(long_options[option_index].name, "disable-source-point") == 0) p->source_point_enabled=0;
else if(strcmp(long_options[option_index].name, "halo-concatenate") == 0) p->halo_concat = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "thread-group-size") == 0) p->stencil_ctx.thread_group_size = atoi(optarg);
else if(strcmp(long_options[option_index].name, "cache-size") == 0) cache_size = atoi(optarg);
else if(strcmp(long_options[option_index].name, "wavefront") == 0) p->wavefront = atoi(optarg)!=0;
else if(strcmp(long_options[option_index].name, "num-wavefronts") == 0) p->stencil_ctx.num_wf = atoi(optarg);
else if(strcmp(long_options[option_index].name, "pad-array") == 0) p->array_padding = 1;
else if(strcmp(long_options[option_index].name, "mwd-type") == 0) p->mwd_type = atoi(optarg);
else if(strcmp(long_options[option_index].name, "use-omp-stat-sched") == 0) p->use_omp_stat_sched = 1;
else if(strcmp(long_options[option_index].name, "thx") == 0) p->stencil_ctx.th_x = atoi(optarg);
else if(strcmp(long_options[option_index].name, "thy") == 0) p->stencil_ctx.th_y = atoi(optarg);
else if(strcmp(long_options[option_index].name, "thz") == 0) p->stencil_ctx.th_z = atoi(optarg);
else if(strcmp(long_options[option_index].name, "thc") == 0) p->stencil_ctx.th_c = atoi(optarg);
else if(strcmp(long_options[option_index].name, "threads") == 0) threads = strtok(optarg,":");
// else if(strcmp(long_options[option_index].name, "target-parallel-wavefront") == 0) p->target_parallel_wavefront = atoi(optarg);
break;
default:
if(p->mpi_rank == 0){
fprintf(stderr, "Invalid arguments\n\n");
}
print_help(p);
break;
}
}
if (optind < argc)
{
if(p->mpi_rank == 0){
fprintf(stderr, "Invalid arguments\n\n");
}
print_help(p);
}
set_centered_source(p);
// set assumed cache size to zero for diamond approach if not set by the user
if(cache_size !=-1) p->cache_size = cache_size;
if( (cache_size ==-1) && (p->target_ts == 2) ) p->cache_size = 0;
// allow thread group size change only for methods supporting multiple thread groups
int num_threads=0;
if(p->target_ts == 2) {
// parse the thread affinity argument
if(threads != NULL){
num_threads = atoi(threads); //parse threads
if(num_threads > 0){ // allow the user to disable manual thread binding by negative value
p->num_threads = num_threads;
p->stencil_ctx.use_manual_cpu_bind = 1;
p->th_stride = 1;
p->th_block = 1;
threads = strtok (NULL, ":");
if(threads != NULL){ //parse block
p->th_block = atoi(threads);
threads = strtok (NULL, ":");
}
if(threads != NULL){ //parse stride
p->th_stride = atoi(threads);
}
}
}
}
p->orig_thread_group_size = p->stencil_ctx.thread_group_size;
// p->use_omp_stat_sched = 0;
}
void print_3Darray(char *filename, real_t * restrict array, int nx, int ny, int nz, int halo) {
int i,j,k;
FILE *fp;
if(!(fp = fopen(filename, "w")))
printf("ERROR: cannot open file for writing\n");
for (i=halo; i<nz-halo; i++) {
// new page
fprintf(fp, "\n***************** slice # %d *************\n",i+1-halo);
for (j=halo; j<ny-halo; j++) {
for (k=halo; k<nx-halo; k++) {
fprintf(fp, "%+.4e ", array[(i*ny+j)*nx+k]);
}
// new line
fprintf(fp, "\n");
}
}
fclose(fp);
}
void print_3Darray_solar(char *filename, real_t * restrict array, int nx, int ny, int nz, int halo) {
int f,i,j,k;
uint64_t idx;
FILE *fp;
if(!(fp = fopen(filename, "w")))
printf("ERROR: cannot open file for writing\n");
for(f=0; f<12;f++){
fprintf(fp, "\n\n***************** field # %d *************",f+1);
for (i=halo; i<nz-halo; i++) {
// new page
fprintf(fp, "\n***************** slice # %d *************\n",i+1-halo);
for (j=halo; j<ny-halo; j++) {
for (k=halo; k<nx-halo; k++) {
idx = 2*( (i*ny+j)*nx + k + f*nx*ny*nz);
fprintf(fp, "%+.4e %+.4e ", array[idx], array[idx+1]);
}
// new line
fprintf(fp, "\n");
}
}
}
fclose(fp);
}
|
3d25pt_var.c
|
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__plus_int64.c
|
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__plus_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int64)
// A*D function (colscale): GB (_AxD__plus_int64)
// D*A function (rowscale): GB (_DxB__plus_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int64)
// C=scalar+B GB (_bind1st__plus_int64)
// C=scalar+B' GB (_bind1st_tran__plus_int64)
// C=A+scalar GB (_bind2nd__plus_int64)
// C=A'+scalar GB (_bind2nd_tran__plus_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT64 || GxB_NO_PLUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_v1_reduction_2.c
|
/* A kernel for two level parallelizable loop with reduction */
#include <omp.h>
float u[100][100];
float foo()
{
int i;
int j;
float temp;
float error;
#pragma omp parallel for private (temp,i,j) reduction (+:error)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (temp,j) reduction (+:error)
for (j = 0; j <= 99; j += 1) {
temp = u[i][j];
error = error + temp * temp;
}
}
return error;
}
|
composite.c
|
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs(Sa-Da);
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sca);
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
test.c
|
#include <stdio.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams
#pragma omp distribute simd
for(int k=0; k<N; k++)
a[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
tree.h
|
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
* \param is_linear Whether the tree has linear models at each leaf
*/
explicit Tree(int max_leaves, bool track_branch_features, bool is_linear);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
virtual ~Tree() noexcept = default;
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split (original feature index)*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature_inner(int split_idx) const { return split_feature_inner_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
virtual inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
if (is_linear_) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] * rate);
for (size_t j = 0; j < leaf_coeff_[i].size(); ++j) {
leaf_coeff_[i][j] = MaybeRoundToZero(leaf_coeff_[i][j] * rate);
}
}
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
if (is_linear_) {
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] * rate);
for (size_t j = 0; j < leaf_coeff_[num_leaves_ - 1].size(); ++j) {
leaf_coeff_[num_leaves_ - 1][j] = MaybeRoundToZero(leaf_coeff_[num_leaves_ - 1][j] * rate);
}
}
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
virtual inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
if (is_linear_) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] + val);
}
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] + val);
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
if (is_linear_) {
leaf_const_[0] = val;
}
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize linear model of tree node to json*/
std::string LinearModelToJSON(int index) const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
/*! \brief Get the linear model constant term (bias) of one leaf */
inline double LeafConst(int leaf) const { return leaf_const_[leaf]; }
/*! \brief Get the linear model coefficients of one leaf */
inline std::vector<double> LeafCoeffs(int leaf) const { return leaf_coeff_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeaturesInner(int leaf) const {return leaf_features_inner_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeatures(int leaf) const {return leaf_features_[leaf]; }
/*! \brief Set the linear model coefficients on one leaf */
inline void SetLeafCoeffs(int leaf, const std::vector<double>& output) {
leaf_coeff_[leaf].resize(output.size());
for (size_t i = 0; i < output.size(); ++i) {
leaf_coeff_[leaf][i] = MaybeRoundToZero(output[i]);
}
}
/*! \brief Set the linear model constant term (bias) on one leaf */
inline void SetLeafConst(int leaf, double output) {
leaf_const_[leaf] = MaybeRoundToZero(output);
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeaturesInner(int leaf, const std::vector<int>& features) {
leaf_features_inner_[leaf] = features;
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeatures(int leaf, const std::vector<int>& features) {
leaf_features_[leaf] = features;
}
inline bool is_linear() const { return is_linear_; }
#ifdef USE_CUDA_EXP
inline bool is_cuda_tree() const { return is_cuda_tree_; }
#endif // USE_CUDA_EXP
inline void SetIsLinear(bool is_linear) {
is_linear_ = is_linear;
}
protected:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
int int_fval;
if (std::isnan(fval)) {
return right_child_[node];
} else {
int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];
}
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values,
std::unordered_map<int, double>* phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int>> branch_features_;
double shrinkage_;
int max_depth_;
/*! \brief Tree has linear model at each leaf */
bool is_linear_;
/*! \brief coefficients of linear models on leaves */
std::vector<std::vector<double>> leaf_coeff_;
/*! \brief constant term (bias) of linear models on leaves */
std::vector<double> leaf_const_;
/* \brief features used in leaf linear models; indexing is relative to num_total_features_ */
std::vector<std::vector<int>> leaf_features_;
/* \brief features used in leaf linear models; indexing is relative to used_features_ */
std::vector<std::vector<int>> leaf_features_inner_;
#ifdef USE_CUDA_EXP
/*! \brief Marks whether this tree is a CUDATree */
bool is_cuda_tree_;
#endif // USE_CUDA_EXP
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_) {
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double* feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeaf(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat_raw = leaf_features_[leaf][i];
double feat_val = feature_values[feat_raw];
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeafByMap(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat = leaf_features_[leaf][i];
auto val_it = feature_values.find(feat);
if (val_it != feature_values.end()) {
double feat_val = val_it->second;
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output) {
(*output)[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
VolumetricConvolutionMM.c
|
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c"
#else
static void inline THNN_(VolumetricConvolutionMM_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *weight,
THTensor *bias,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH) {
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(kT > 0 && kW > 0 && kH > 0, 8,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW);
int ndim = input->nDimension;
int dimf = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5)
{
dimf++;
dimt++;
dimh++;
dimw++;
}
long nInputPlane;
long inputDepth;
long inputHeight;
long inputWidth;
long nOutputPlane;
long outputDepth;
long outputHeight;
long outputWidth;
nInputPlane = input->size[dimf];
inputDepth = input->size[dimt];
inputHeight = input->size[dimh];
inputWidth = input->size[dimw];
nOutputPlane = weight->size[0];
outputDepth = (inputDepth + 2*pT - kT) / dT + 1;
outputHeight = (inputHeight + 2*pH - kH) / dH + 1;
outputWidth = (inputWidth + 2*pW - kW) / dW + 1;
if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1)
{
THError(
"Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small",
nInputPlane, inputDepth, inputHeight, inputWidth,
nOutputPlane, outputDepth, outputHeight, outputWidth
);
}
THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4,
"weight tensor should be 2D or 5D - got %d", weight->nDimension);
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight)(THTensor *weight)
{
weight = THTensor_(newContiguous)(weight);
if (weight->nDimension == 5) {
long s1 = weight->size[0];
long s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4];
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
s1, -1, s2, -1);
THTensor_(free)(old_weight);
}
return weight;
}
/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */
static void THNN_(unfolded_acc_vol)(
THTensor *finput,
THTensor *input,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
long nInputPlane,
long inputDepth,
long inputWidth,
long inputHeight,
long outputDepth,
long outputWidth,
long outputHeight)
{
long nip;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
//#pragma omp parallel for private(nip)
for (nip = 0; nip < nInputPlane; nip++)
{
long kt, kw, kh, t, y, x, it, ix, iy;
for (kt = 0; kt < kT; kt++)
{
for (kh = 0; kh < kH; kh++)
{
for (kw = 0; kw < kW; kw++)
{
real *src = finput_data
+ nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth)
+ kt * (kH*kW*outputDepth*outputHeight*outputWidth)
+ kh * (kW*outputDepth*outputHeight*outputWidth)
+ kw * (outputDepth*outputHeight*outputWidth);
real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth);
if (pT > 0 || pH > 0 || pW > 0)
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT - pT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH - pH + kh;
for (x = 0; x < outputWidth; x++)
{
ix = x*dW - pW + kw;
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth)
{
}
else
{
real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1);
}
}
}
}
}
else
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH + kh;
for(x = 0; x < outputWidth; x++)
{
ix = x*dW + kw;
real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1);
}
}
}
}
}
}
}
}
}
static void THNN_(unfolded_copy_vol)(
THTensor *finput,
THTensor *input,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
long nInputPlane,
long inputDepth,
long inputWidth,
long inputHeight,
long outputDepth,
long outputWidth,
long outputHeight)
{
long k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
// #pragma omp parallel for private(k)
for (k = 0; k < nInputPlane*kT*kH*kW; k++)
{
long nip = k / (kT*kH*kW);
long rest = k % (kT*kH*kW);
long kt = rest / (kH*kW);
rest = rest % (kH*kW);
long kh = rest / kW;
long kw = rest % kW;
long t,x,y,it,ix,iy;
real *dst = finput_data
+ nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth)
+ kt * (kH*kW*outputDepth*outputHeight*outputWidth)
+ kh * (kW*outputDepth*outputHeight*outputWidth)
+ kw * (outputDepth*outputHeight*outputWidth);
real *src = input_data + nip*(inputDepth*inputHeight*inputWidth);
if (pT > 0 || pH > 0 || pW > 0)
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT - pT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH - pH + kh;
for (x = 0; x < outputWidth; x++)
{
ix = x*dW - pW + kw;
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth)
memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1));
else
memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
else
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH + kh;
for(x = 0; x < outputWidth; x++)
{
ix = x*dW + kw;
memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
}
}
static void THNN_(VolumetricConvolutionMM_updateOutput_frame)(
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
long nInputPlane,
long inputDepth,
long inputWidth,
long inputHeight,
long nOutputPlane,
long outputDepth,
long outputWidth,
long outputHeight)
{
long i;
THTensor *output2d;
THNN_(unfolded_copy_vol)(
finput, input,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane,
inputDepth, inputWidth, inputHeight,
outputDepth, outputWidth, outputHeight
);
output2d = THTensor_(newWithStorage2d)(
output->storage, output->storageOffset, nOutputPlane, -1,
outputDepth*outputHeight*outputWidth, -1
);
if (bias) {
for (i = 0; i < nOutputPlane; i++)
{
THVector_(fill)(
output->storage->data+output->storageOffset+output->stride[0]*i,
THTensor_(get1d)(bias, i),
outputDepth*outputHeight*outputWidth
);
}
} else {
THTensor_(zero)(output);
}
THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput);
THTensor_(free)(output2d);
}
void THNN_(VolumetricConvolutionMM_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimf = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
long nInputPlane;
long inputDepth;
long inputHeight;
long inputWidth;
long nOutputPlane;
long outputDepth;
long outputHeight;
long outputWidth;
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, NULL, weight, bias,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
if (input->nDimension == 5)
{
dimf++;
dimt++;
dimh++;
dimw++;
}
nInputPlane = input->size[dimf];
inputDepth = input->size[dimt];
inputHeight = input->size[dimh];
inputWidth = input->size[dimw];
nOutputPlane = weight->size[0];
outputDepth = (inputDepth + 2*pT - kT) / dT + 1;
outputHeight = (inputHeight + 2*pH - kH) / dH + 1;
outputWidth = (inputWidth + 2*pW - kW) / dW + 1;
weight = THNN_(view_weight)(weight);
if (input->nDimension == 4)
{
THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth);
THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth);
THNN_(VolumetricConvolutionMM_updateOutput_frame)(
input, output, weight, bias, finput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane, inputDepth, inputWidth, inputHeight,
nOutputPlane, outputDepth, outputWidth, outputHeight
);
}
else
{
long T = input->size[0];
long t;
THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth);
THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth);
// #pragma omp parallel for private(t)
for (t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(VolumetricConvolutionMM_updateOutput_frame)(
input_t, output_t, weight, bias, finput_t,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane, inputDepth, inputWidth, inputHeight,
nOutputPlane, outputDepth, outputWidth, outputHeight
);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
THTensor *gradInput,
THTensor *gradOutput,
THTensor *weight,
THTensor *fgradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(
gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1
);
THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
THTensor_(free)(gradOutput2d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc_vol)(
fgradInput, gradInput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2],
gradOutput->size[1], gradOutput->size[3], gradOutput->size[2]
);
}
void THNN_(VolumetricConvolutionMM_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int nOutputPlane = (int)weight->size[0];
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, gradOutput, weight, NULL,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
weight = THNN_(view_weight)(weight);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
// depending on the BLAS library, fgradInput (result tensor) might
// be left uninitialized on zero alpha, which might lead to weird behavior
// hence, to be safe, zero it
THTensor_(zero)(fgradInput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 0, 1);
if (input->nDimension == 4)
{
THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
gradInput, gradOutput, tweight, fgradInput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH
);
}
else
{
long T = input->size[0];
long t;
//#pragma omp parallel for private(t)
for (t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
gradInput_t, gradOutput_t, tweight, fgradInput_t,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH
);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)(
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
real scale)
{
long i;
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(
gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1
);
THTensor *tfinput = THTensor_(new)();
THTensor_(transpose)(tfinput, finput, 0, 1);
THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput);
THTensor_(free)(tfinput);
if (gradBias) {
for (i = 0; i < gradBias->size[0]; i++)
{
long k;
real sum = 0;
real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0];
for (k = 0; k < gradOutput2d->size[1]; k++)
sum += data[k];
(gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum;
}
}
THTensor_(free)(gradOutput2d);
}
void THNN_(VolumetricConvolutionMM_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int nOutputPlane = (int)gradWeight->size[0];
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, gradOutput, gradWeight, gradBias,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
gradWeight = THNN_(view_weight)(gradWeight);
if (input->nDimension == 4) // non-batch mode
{
THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale);
}
else // batch mode
{
long T = input->size[0];
long t;
for (t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
openmp_sendrecv.c
|
/*
(C) 2007 by Argonne National Laboratory.
See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define BUFLEN 512
#define NTIMES 50
#define MAX_THREADS 10
/*
Concurrent send and recv by multiple threads on each process.
*/
void *thd_sendrecv( void * );
void *thd_sendrecv( void *comm_ptr )
{
MPI_Comm comm;
int my_rank, num_procs, next, buffer_size, namelen, idx;
char buffer[BUFLEN], processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
comm = *(MPI_Comm *) comm_ptr;
MPI_Comm_size( comm, &num_procs );
MPI_Comm_rank( comm, &my_rank );
MPI_Get_processor_name( processor_name, &namelen );
fprintf( stdout, "Process %d on %s\n", my_rank, processor_name );
strcpy( buffer, "hello there" );
buffer_size = strlen(buffer)+1;
if ( my_rank == num_procs-1 )
next = 0;
else
next = my_rank+1;
for ( idx = 0; idx < NTIMES; idx++ ) {
if (my_rank == 0) {
/*
printf("%d sending '%s' \n",my_rank,buffer);
*/
MPI_Send(buffer, buffer_size, MPI_CHAR, next, 99, comm);
MPI_Send(buffer, buffer_size, MPI_CHAR, MPI_PROC_NULL, 299, comm);
/*
printf("%d receiving \n",my_rank);
*/
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99,
comm, &status);
/*
printf("%d received '%s' \n",my_rank,buffer);
*/
}
else {
/*
printf("%d receiving \n",my_rank);
*/
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99,
comm, &status);
MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_PROC_NULL, 299,
comm, &status);
/*
printf("%d received '%s' \n",my_rank,buffer);
*/
MPI_Send(buffer, buffer_size, MPI_CHAR, next, 99, comm);
/*
printf("%d sent '%s' \n",my_rank,buffer);
*/
}
/* MPI_Barrier(comm); */
}
return 0;
}
int main( int argc,char *argv[] )
{
MPI_Comm comm[ MAX_THREADS ];
int my_rank, ii, provided;
int num_threads;
MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided );
if ( provided != MPI_THREAD_MULTIPLE ) {
fprintf( stderr, "Aborting, MPI_THREAD_MULTIPLE is needed...\n" );
fflush( stderr );
MPI_Abort( MPI_COMM_WORLD, 1 );
}
MPI_Comm_rank( MPI_COMM_WORLD, &my_rank );
if ( my_rank == 0 ) {
if ( argc != 2 ) {
fprintf( stderr, "Error: %s num_threads\n", argv[0] );
fflush( stderr );
MPI_Abort( MPI_COMM_WORLD, 1 );
}
num_threads = atoi( argv[1] );
if ( num_threads < 1 ) {
fprintf( stderr, "Error: Input num_threads=%d < 1 \n",
num_threads );
fflush( stderr );
MPI_Abort( MPI_COMM_WORLD, 1 );
}
if ( num_threads > MAX_THREADS ) {
fprintf( stderr, "Error: Input num_threads=%d < %d \n",
num_threads, MAX_THREADS );
fflush( stderr );
MPI_Abort( MPI_COMM_WORLD, 1 );
}
MPI_Bcast( &num_threads, 1, MPI_INT, 0, MPI_COMM_WORLD );
}
else
MPI_Bcast( &num_threads, 1, MPI_INT, 0, MPI_COMM_WORLD );
MPI_Barrier( MPI_COMM_WORLD );
for ( ii=0; ii < num_threads; ii++ ) {
MPI_Comm_dup( MPI_COMM_WORLD, &comm[ii] );
}
#pragma omp parallel shared( num_threads ) private( ii )
#pragma omp for
for ( ii=0; ii < num_threads; ii++ ) {
thd_sendrecv( (void *) &comm[ii] );
}
MPI_Finalize();
return 0;
}
|
KMeans.h
|
#pragma once
#include "headers/Matrix.h"
#include "ClosestCentroids.h"
template<typename T>
class KMeans{
public:
KMeans(const Matrix<T>& dataset, int n_clusters, bool stop_criterion=true, int n_threads=1);
Matrix<T> getCentroid();
Matrix<int> getDataToCentroid();
int getNIters();
void mapSampleToCentroid();
void updateCentroids();
void run(int max_iter, float threashold=-1);
void print();
private:
bool _stop_crit;
int _n_threads;
int _n_iters = 0;
/**
* number of features of the dataset (x0, x1, ..., xn)
*/
int _dims;
/**
* number of training samples
*/
int _samples;
/**
* desired number of clusters
*/
int _n_clusters;
/**
* K cluster centroids µ1, ..., µK. NxK matrix
* where:
* N: number of dimensions
* K: number of classes/clusters
*/
std::unique_ptr<Matrix<T>> _centroids;
/**
* By convention, _training_set is a NxM matrix
* where:
* N: number of dimensions
* M: number of training samples
*/
Matrix<T> _training_set;
/**
* M centroids indices mapping each training sample to a
* corresponding cluster. 1xM matrix
* where:
* M: number of samples
* element: index of cluster mapping
* taining_index -> cluster_index
* note: M may increase as we add new samples
* The class doesn't support that yet
*
* stopping criterion idea:
* keep _dataset_cluster from step t-1
* and check for changes.
* If almost no change -> stop algorithm
*/
std::unique_ptr<ClosestCentroids<T>> _dataset_to_centroids;
};
template<typename T>
KMeans<T>::KMeans(const Matrix<T>& dataset, int n_clusters, bool stop_criterion, int n_threads) :
_training_set{ dataset },
_n_clusters{ n_clusters },
_stop_crit{ stop_criterion },
_n_threads{ n_threads } {
_training_set = dataset;
_dims = dataset.getRows();
_samples = dataset.getCols();
_training_set.setThreads(_n_threads);
Matrix<T> vMinValues = _training_set.vMin();
Matrix<T> vMaxValues = _training_set.vMax();
_centroids = std::make_unique<Matrix<T>>(_dims, n_clusters, UNIFORM, vMinValues, vMaxValues);
_centroids->setThreads(_n_threads);
_dataset_to_centroids = std::make_unique<ClosestCentroids<T>>(_samples, 0, stop_criterion, _n_threads);
}
template<typename T>
inline Matrix<T> KMeans<T>::getCentroid(){ return *_centroids; }
template<typename T>
inline Matrix<int> KMeans<T>::getDataToCentroid(){ return *static_cast<Matrix<int>* >(_dataset_to_centroids.get()); }
template<typename T>
inline int KMeans<T>::getNIters(){ return _n_iters; }
template<typename T>
void KMeans<T>::mapSampleToCentroid(){ _dataset_to_centroids->getClosest(_training_set, *_centroids); }
template<typename T>
void KMeans<T>::updateCentroids(){
// number of points assigned to a cluster
int occurences[_n_clusters] = {0};
// accumulates the samples to compute new cluster positions
T sample_buff[_n_clusters*_dims] = {0};
//#pragma omp parallel for num_threads(_n_threads)
for(int i = 0; i < _samples; ++i){
const int& k_index = (*_dataset_to_centroids)(i);
for(int d = 0; d < _dims; ++d){
//#pragma atomic read write
sample_buff[k_index+d*_n_clusters] += _training_set(d, i);
}
//#pragma atomic write
++occurences[k_index];
}
//#pragma omp parallel for num_threads(_n_threads)
for(int c = 0; c < _n_clusters; ++c){
if(!occurences[c]) continue;
for(int d = 0; d < _dims; ++d){
(*_centroids)(d, c) = sample_buff[c+d*_n_clusters] / occurences[c];
}
}
}
template<typename T>
void KMeans<T>::run(int max_iter, float threashold){
mapSampleToCentroid();
updateCentroids();
_n_iters = 1;
if(max_iter == 1) return;
int epoch = 1;
float modif_rate_prev = 0;
float modif_rate_curr;
float inertia;
do {
mapSampleToCentroid();
updateCentroids();
modif_rate_curr = _dataset_to_centroids->getModifRate();
inertia = modif_rate_curr - modif_rate_prev;
modif_rate_prev = modif_rate_curr;
//printf("%.3f %.3f\n", modif_rate_curr, inertia);
++epoch;
} while(epoch < max_iter && modif_rate_curr >= threashold && std::abs(inertia) >= 1e-2);
//} while(epoch < max_iter && modif_rate_curr > threashold);
//} while(epoch < max_iter);
_n_iters = epoch;
//printf("iter number: %d\n", epoch);
}
template<typename T>
void KMeans<T>::print() {
for(int d = 0; d < _dims; ++d){
std::cout << "[";
std::cout << _centroids->row(d) << "]," << std::endl;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.